aboutsummaryrefslogtreecommitdiffstats
path: root/libsolidity/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'libsolidity/codegen')
-rw-r--r--libsolidity/codegen/ArrayUtils.cpp893
-rw-r--r--libsolidity/codegen/ArrayUtils.h5
-rw-r--r--libsolidity/codegen/CompilerContext.cpp157
-rw-r--r--libsolidity/codegen/CompilerContext.h61
-rw-r--r--libsolidity/codegen/CompilerUtils.cpp221
-rw-r--r--libsolidity/codegen/CompilerUtils.h44
-rw-r--r--libsolidity/codegen/ContractCompiler.cpp249
-rw-r--r--libsolidity/codegen/ExpressionCompiler.cpp380
-rw-r--r--libsolidity/codegen/ExpressionCompiler.h8
-rw-r--r--libsolidity/codegen/LValue.cpp11
10 files changed, 1273 insertions, 756 deletions
diff --git a/libsolidity/codegen/ArrayUtils.cpp b/libsolidity/codegen/ArrayUtils.cpp
index 2c982982..67ca22f1 100644
--- a/libsolidity/codegen/ArrayUtils.cpp
+++ b/libsolidity/codegen/ArrayUtils.cpp
@@ -25,7 +25,7 @@
#include <libsolidity/codegen/CompilerContext.h>
#include <libsolidity/codegen/CompilerUtils.h>
#include <libsolidity/ast/Types.h>
-#include <libsolidity/interface/Utils.h>
+#include <libsolidity/interface/Exceptions.h>
#include <libsolidity/codegen/LValue.h>
using namespace std;
@@ -40,9 +40,9 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// stack layout: [source_ref] [source length] target_ref (top)
solAssert(_targetType.location() == DataLocation::Storage, "");
- IntegerType uint256(256);
- Type const* targetBaseType = _targetType.isByteArray() ? &uint256 : &(*_targetType.baseType());
- Type const* sourceBaseType = _sourceType.isByteArray() ? &uint256 : &(*_sourceType.baseType());
+ TypePointer uint256 = make_shared<IntegerType>(256);
+ TypePointer targetBaseType = _targetType.isByteArray() ? uint256 : _targetType.baseType();
+ TypePointer sourceBaseType = _sourceType.isByteArray() ? uint256 : _sourceType.baseType();
// TODO unroll loop for small sizes
@@ -70,202 +70,216 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
}
// stack: target_ref source_ref source_length
- m_context << Instruction::DUP3;
- // stack: target_ref source_ref source_length target_ref
- retrieveLength(_targetType);
- // stack: target_ref source_ref source_length target_ref target_length
- if (_targetType.isDynamicallySized())
- // store new target length
- if (!_targetType.isByteArray())
- // Otherwise, length will be stored below.
- m_context << Instruction::DUP3 << Instruction::DUP3 << Instruction::SSTORE;
- if (sourceBaseType->category() == Type::Category::Mapping)
- {
- solAssert(targetBaseType->category() == Type::Category::Mapping, "");
- solAssert(_sourceType.location() == DataLocation::Storage, "");
- // nothing to copy
- m_context
- << Instruction::POP << Instruction::POP
- << Instruction::POP << Instruction::POP;
- return;
- }
- // stack: target_ref source_ref source_length target_ref target_length
- // compute hashes (data positions)
- m_context << Instruction::SWAP1;
- if (_targetType.isDynamicallySized())
- CompilerUtils(m_context).computeHashStatic();
- // stack: target_ref source_ref source_length target_length target_data_pos
- m_context << Instruction::SWAP1;
- convertLengthToSize(_targetType);
- m_context << Instruction::DUP2 << Instruction::ADD;
- // stack: target_ref source_ref source_length target_data_pos target_data_end
- m_context << Instruction::SWAP3;
- // stack: target_ref target_data_end source_length target_data_pos source_ref
-
- eth::AssemblyItem copyLoopEndWithoutByteOffset = m_context.newTag();
-
- // special case for short byte arrays: Store them together with their length.
- if (_targetType.isByteArray())
- {
- // stack: target_ref target_data_end source_length target_data_pos source_ref
- m_context << Instruction::DUP3 << u256(31) << Instruction::LT;
- eth::AssemblyItem longByteArray = m_context.appendConditionalJump();
- // store the short byte array
- solAssert(_sourceType.isByteArray(), "");
- if (_sourceType.location() == DataLocation::Storage)
- {
- // just copy the slot, it contains length and data
- m_context << Instruction::DUP1 << Instruction::SLOAD;
- m_context << Instruction::DUP6 << Instruction::SSTORE;
- }
- else
+ TypePointer targetType = _targetType.shared_from_this();
+ TypePointer sourceType = _sourceType.shared_from_this();
+ m_context.callLowLevelFunction(
+ "$copyArrayToStorage_" + sourceType->identifier() + "_to_" + targetType->identifier(),
+ 3,
+ 1,
+ [=](CompilerContext& _context)
{
- m_context << Instruction::DUP1;
- CompilerUtils(m_context).loadFromMemoryDynamic(*sourceBaseType, fromCalldata, true, false);
- // stack: target_ref target_data_end source_length target_data_pos source_ref value
- // clear the lower-order byte - which will hold the length
- m_context << u256(0xff) << Instruction::NOT << Instruction::AND;
- // fetch the length and shift it left by one
- m_context << Instruction::DUP4 << Instruction::DUP1 << Instruction::ADD;
- // combine value and length and store them
- m_context << Instruction::OR << Instruction::DUP6 << Instruction::SSTORE;
- }
- // end of special case, jump right into cleaning target data area
- m_context.appendJumpTo(copyLoopEndWithoutByteOffset);
- m_context << longByteArray;
- // Store length (2*length+1)
- m_context << Instruction::DUP3 << Instruction::DUP1 << Instruction::ADD;
- m_context << u256(1) << Instruction::ADD;
- m_context << Instruction::DUP6 << Instruction::SSTORE;
- }
+ ArrayUtils utils(_context);
+ ArrayType const& _sourceType = dynamic_cast<ArrayType const&>(*sourceType);
+ ArrayType const& _targetType = dynamic_cast<ArrayType const&>(*targetType);
+ // stack: target_ref source_ref source_length
+ _context << Instruction::DUP3;
+ // stack: target_ref source_ref source_length target_ref
+ utils.retrieveLength(_targetType);
+ // stack: target_ref source_ref source_length target_ref target_length
+ if (_targetType.isDynamicallySized())
+ // store new target length
+ if (!_targetType.isByteArray())
+ // Otherwise, length will be stored below.
+ _context << Instruction::DUP3 << Instruction::DUP3 << Instruction::SSTORE;
+ if (sourceBaseType->category() == Type::Category::Mapping)
+ {
+ solAssert(targetBaseType->category() == Type::Category::Mapping, "");
+ solAssert(_sourceType.location() == DataLocation::Storage, "");
+ // nothing to copy
+ _context
+ << Instruction::POP << Instruction::POP
+ << Instruction::POP << Instruction::POP;
+ return;
+ }
+ // stack: target_ref source_ref source_length target_ref target_length
+ // compute hashes (data positions)
+ _context << Instruction::SWAP1;
+ if (_targetType.isDynamicallySized())
+ CompilerUtils(_context).computeHashStatic();
+ // stack: target_ref source_ref source_length target_length target_data_pos
+ _context << Instruction::SWAP1;
+ utils.convertLengthToSize(_targetType);
+ _context << Instruction::DUP2 << Instruction::ADD;
+ // stack: target_ref source_ref source_length target_data_pos target_data_end
+ _context << Instruction::SWAP3;
+ // stack: target_ref target_data_end source_length target_data_pos source_ref
+
+ eth::AssemblyItem copyLoopEndWithoutByteOffset = _context.newTag();
+
+ // special case for short byte arrays: Store them together with their length.
+ if (_targetType.isByteArray())
+ {
+ // stack: target_ref target_data_end source_length target_data_pos source_ref
+ _context << Instruction::DUP3 << u256(31) << Instruction::LT;
+ eth::AssemblyItem longByteArray = _context.appendConditionalJump();
+ // store the short byte array
+ solAssert(_sourceType.isByteArray(), "");
+ if (_sourceType.location() == DataLocation::Storage)
+ {
+ // just copy the slot, it contains length and data
+ _context << Instruction::DUP1 << Instruction::SLOAD;
+ _context << Instruction::DUP6 << Instruction::SSTORE;
+ }
+ else
+ {
+ _context << Instruction::DUP1;
+ CompilerUtils(_context).loadFromMemoryDynamic(*sourceBaseType, fromCalldata, true, false);
+ // stack: target_ref target_data_end source_length target_data_pos source_ref value
+ // clear the lower-order byte - which will hold the length
+ _context << u256(0xff) << Instruction::NOT << Instruction::AND;
+ // fetch the length and shift it left by one
+ _context << Instruction::DUP4 << Instruction::DUP1 << Instruction::ADD;
+ // combine value and length and store them
+ _context << Instruction::OR << Instruction::DUP6 << Instruction::SSTORE;
+ }
+ // end of special case, jump right into cleaning target data area
+ _context.appendJumpTo(copyLoopEndWithoutByteOffset);
+ _context << longByteArray;
+ // Store length (2*length+1)
+ _context << Instruction::DUP3 << Instruction::DUP1 << Instruction::ADD;
+ _context << u256(1) << Instruction::ADD;
+ _context << Instruction::DUP6 << Instruction::SSTORE;
+ }
- // skip copying if source length is zero
- m_context << Instruction::DUP3 << Instruction::ISZERO;
- m_context.appendConditionalJumpTo(copyLoopEndWithoutByteOffset);
-
- if (_sourceType.location() == DataLocation::Storage && _sourceType.isDynamicallySized())
- CompilerUtils(m_context).computeHashStatic();
- // stack: target_ref target_data_end source_length target_data_pos source_data_pos
- m_context << Instruction::SWAP2;
- convertLengthToSize(_sourceType);
- m_context << Instruction::DUP3 << Instruction::ADD;
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end
- if (haveByteOffsetTarget)
- m_context << u256(0);
- if (haveByteOffsetSource)
- m_context << u256(0);
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
- eth::AssemblyItem copyLoopStart = m_context.newTag();
- m_context << copyLoopStart;
- // check for loop condition
- m_context
- << dupInstruction(3 + byteOffsetSize) << dupInstruction(2 + byteOffsetSize)
- << Instruction::GT << Instruction::ISZERO;
- eth::AssemblyItem copyLoopEnd = m_context.appendConditionalJump();
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
- // copy
- if (sourceBaseType->category() == Type::Category::Array)
- {
- solAssert(byteOffsetSize == 0, "Byte offset for array as base type.");
- auto const& sourceBaseArrayType = dynamic_cast<ArrayType const&>(*sourceBaseType);
- m_context << Instruction::DUP3;
- if (sourceBaseArrayType.location() == DataLocation::Memory)
- m_context << Instruction::MLOAD;
- m_context << Instruction::DUP3;
- copyArrayToStorage(dynamic_cast<ArrayType const&>(*targetBaseType), sourceBaseArrayType);
- m_context << Instruction::POP;
- }
- else if (directCopy)
- {
- solAssert(byteOffsetSize == 0, "Byte offset for direct copy.");
- m_context
- << Instruction::DUP3 << Instruction::SLOAD
- << Instruction::DUP3 << Instruction::SSTORE;
- }
- else
- {
- // Note that we have to copy each element on its own in case conversion is involved.
- // We might copy too much if there is padding at the last element, but this way end
- // checking is easier.
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
- m_context << dupInstruction(3 + byteOffsetSize);
- if (_sourceType.location() == DataLocation::Storage)
- {
+ // skip copying if source length is zero
+ _context << Instruction::DUP3 << Instruction::ISZERO;
+ _context.appendConditionalJumpTo(copyLoopEndWithoutByteOffset);
+
+ if (_sourceType.location() == DataLocation::Storage && _sourceType.isDynamicallySized())
+ CompilerUtils(_context).computeHashStatic();
+ // stack: target_ref target_data_end source_length target_data_pos source_data_pos
+ _context << Instruction::SWAP2;
+ utils.convertLengthToSize(_sourceType);
+ _context << Instruction::DUP3 << Instruction::ADD;
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end
+ if (haveByteOffsetTarget)
+ _context << u256(0);
if (haveByteOffsetSource)
- m_context << Instruction::DUP2;
+ _context << u256(0);
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
+ eth::AssemblyItem copyLoopStart = _context.newTag();
+ _context << copyLoopStart;
+ // check for loop condition
+ _context
+ << dupInstruction(3 + byteOffsetSize) << dupInstruction(2 + byteOffsetSize)
+ << Instruction::GT << Instruction::ISZERO;
+ eth::AssemblyItem copyLoopEnd = _context.appendConditionalJump();
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
+ // copy
+ if (sourceBaseType->category() == Type::Category::Array)
+ {
+ solAssert(byteOffsetSize == 0, "Byte offset for array as base type.");
+ auto const& sourceBaseArrayType = dynamic_cast<ArrayType const&>(*sourceBaseType);
+ _context << Instruction::DUP3;
+ if (sourceBaseArrayType.location() == DataLocation::Memory)
+ _context << Instruction::MLOAD;
+ _context << Instruction::DUP3;
+ utils.copyArrayToStorage(dynamic_cast<ArrayType const&>(*targetBaseType), sourceBaseArrayType);
+ _context << Instruction::POP;
+ }
+ else if (directCopy)
+ {
+ solAssert(byteOffsetSize == 0, "Byte offset for direct copy.");
+ _context
+ << Instruction::DUP3 << Instruction::SLOAD
+ << Instruction::DUP3 << Instruction::SSTORE;
+ }
+ else
+ {
+ // Note that we have to copy each element on its own in case conversion is involved.
+ // We might copy too much if there is padding at the last element, but this way end
+ // checking is easier.
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
+ _context << dupInstruction(3 + byteOffsetSize);
+ if (_sourceType.location() == DataLocation::Storage)
+ {
+ if (haveByteOffsetSource)
+ _context << Instruction::DUP2;
+ else
+ _context << u256(0);
+ StorageItem(_context, *sourceBaseType).retrieveValue(SourceLocation(), true);
+ }
+ else if (sourceBaseType->isValueType())
+ CompilerUtils(_context).loadFromMemoryDynamic(*sourceBaseType, fromCalldata, true, false);
+ else
+ solUnimplemented("Copying of type " + _sourceType.toString(false) + " to storage not yet supported.");
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset] <source_value>...
+ solAssert(
+ 2 + byteOffsetSize + sourceBaseType->sizeOnStack() <= 16,
+ "Stack too deep, try removing local variables."
+ );
+ // fetch target storage reference
+ _context << dupInstruction(2 + byteOffsetSize + sourceBaseType->sizeOnStack());
+ if (haveByteOffsetTarget)
+ _context << dupInstruction(1 + byteOffsetSize + sourceBaseType->sizeOnStack());
+ else
+ _context << u256(0);
+ StorageItem(_context, *targetBaseType).storeValue(*sourceBaseType, SourceLocation(), true);
+ }
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
+ // increment source
+ if (haveByteOffsetSource)
+ utils.incrementByteOffset(sourceBaseType->storageBytes(), 1, haveByteOffsetTarget ? 5 : 4);
+ else
+ {
+ _context << swapInstruction(2 + byteOffsetSize);
+ if (sourceIsStorage)
+ _context << sourceBaseType->storageSize();
+ else if (_sourceType.location() == DataLocation::Memory)
+ _context << sourceBaseType->memoryHeadSize();
+ else
+ _context << sourceBaseType->calldataEncodedSize(true);
+ _context
+ << Instruction::ADD
+ << swapInstruction(2 + byteOffsetSize);
+ }
+ // increment target
+ if (haveByteOffsetTarget)
+ utils.incrementByteOffset(targetBaseType->storageBytes(), byteOffsetSize, byteOffsetSize + 2);
else
- m_context << u256(0);
- StorageItem(m_context, *sourceBaseType).retrieveValue(SourceLocation(), true);
+ _context
+ << swapInstruction(1 + byteOffsetSize)
+ << targetBaseType->storageSize()
+ << Instruction::ADD
+ << swapInstruction(1 + byteOffsetSize);
+ _context.appendJumpTo(copyLoopStart);
+ _context << copyLoopEnd;
+ if (haveByteOffsetTarget)
+ {
+ // clear elements that might be left over in the current slot in target
+ // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end target_byte_offset [source_byte_offset]
+ _context << dupInstruction(byteOffsetSize) << Instruction::ISZERO;
+ eth::AssemblyItem copyCleanupLoopEnd = _context.appendConditionalJump();
+ _context << dupInstruction(2 + byteOffsetSize) << dupInstruction(1 + byteOffsetSize);
+ StorageItem(_context, *targetBaseType).setToZero(SourceLocation(), true);
+ utils.incrementByteOffset(targetBaseType->storageBytes(), byteOffsetSize, byteOffsetSize + 2);
+ _context.appendJumpTo(copyLoopEnd);
+
+ _context << copyCleanupLoopEnd;
+ _context << Instruction::POP; // might pop the source, but then target is popped next
+ }
+ if (haveByteOffsetSource)
+ _context << Instruction::POP;
+ _context << copyLoopEndWithoutByteOffset;
+
+ // zero-out leftovers in target
+ // stack: target_ref target_data_end source_data_pos target_data_pos_updated source_data_end
+ _context << Instruction::POP << Instruction::SWAP1 << Instruction::POP;
+ // stack: target_ref target_data_end target_data_pos_updated
+ utils.clearStorageLoop(targetBaseType);
+ _context << Instruction::POP;
}
- else if (sourceBaseType->isValueType())
- CompilerUtils(m_context).loadFromMemoryDynamic(*sourceBaseType, fromCalldata, true, false);
- else
- solUnimplemented("Copying of type " + _sourceType.toString(false) + " to storage not yet supported.");
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset] <source_value>...
- solAssert(
- 2 + byteOffsetSize + sourceBaseType->sizeOnStack() <= 16,
- "Stack too deep, try removing local variables."
- );
- // fetch target storage reference
- m_context << dupInstruction(2 + byteOffsetSize + sourceBaseType->sizeOnStack());
- if (haveByteOffsetTarget)
- m_context << dupInstruction(1 + byteOffsetSize + sourceBaseType->sizeOnStack());
- else
- m_context << u256(0);
- StorageItem(m_context, *targetBaseType).storeValue(*sourceBaseType, SourceLocation(), true);
- }
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
- // increment source
- if (haveByteOffsetSource)
- incrementByteOffset(sourceBaseType->storageBytes(), 1, haveByteOffsetTarget ? 5 : 4);
- else
- {
- m_context << swapInstruction(2 + byteOffsetSize);
- if (sourceIsStorage)
- m_context << sourceBaseType->storageSize();
- else if (_sourceType.location() == DataLocation::Memory)
- m_context << sourceBaseType->memoryHeadSize();
- else
- m_context << sourceBaseType->calldataEncodedSize(true);
- m_context
- << Instruction::ADD
- << swapInstruction(2 + byteOffsetSize);
- }
- // increment target
- if (haveByteOffsetTarget)
- incrementByteOffset(targetBaseType->storageBytes(), byteOffsetSize, byteOffsetSize + 2);
- else
- m_context
- << swapInstruction(1 + byteOffsetSize)
- << targetBaseType->storageSize()
- << Instruction::ADD
- << swapInstruction(1 + byteOffsetSize);
- m_context.appendJumpTo(copyLoopStart);
- m_context << copyLoopEnd;
- if (haveByteOffsetTarget)
- {
- // clear elements that might be left over in the current slot in target
- // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end target_byte_offset [source_byte_offset]
- m_context << dupInstruction(byteOffsetSize) << Instruction::ISZERO;
- eth::AssemblyItem copyCleanupLoopEnd = m_context.appendConditionalJump();
- m_context << dupInstruction(2 + byteOffsetSize) << dupInstruction(1 + byteOffsetSize);
- StorageItem(m_context, *targetBaseType).setToZero(SourceLocation(), true);
- incrementByteOffset(targetBaseType->storageBytes(), byteOffsetSize, byteOffsetSize + 2);
- m_context.appendJumpTo(copyLoopEnd);
-
- m_context << copyCleanupLoopEnd;
- m_context << Instruction::POP; // might pop the source, but then target is popped next
- }
- if (haveByteOffsetSource)
- m_context << Instruction::POP;
- m_context << copyLoopEndWithoutByteOffset;
-
- // zero-out leftovers in target
- // stack: target_ref target_data_end source_data_pos target_data_pos_updated source_data_end
- m_context << Instruction::POP << Instruction::SWAP1 << Instruction::POP;
- // stack: target_ref target_data_end target_data_pos_updated
- clearStorageLoop(*targetBaseType);
- m_context << Instruction::POP;
+ );
}
void ArrayUtils::copyArrayToMemory(ArrayType const& _sourceType, bool _padToWordBoundaries) const
@@ -335,9 +349,14 @@ void ArrayUtils::copyArrayToMemory(ArrayType const& _sourceType, bool _padToWord
if (baseSize > 1)
m_context << u256(baseSize) << Instruction::MUL;
// stack: <target> <source> <size>
- //@TODO do not use ::CALL if less than 32 bytes?
m_context << Instruction::DUP1 << Instruction::DUP4 << Instruction::DUP4;
- utils.memoryCopy();
+ // We can resort to copying full 32 bytes only if
+ // - the length is known to be a multiple of 32 or
+ // - we will pad to full 32 bytes later anyway.
+ if (((baseSize % 32) == 0) || _padToWordBoundaries)
+ utils.memoryCopy32();
+ else
+ utils.memoryCopy();
m_context << Instruction::SWAP1 << Instruction::POP;
// stack: <target> <size>
@@ -430,7 +449,7 @@ void ArrayUtils::copyArrayToMemory(ArrayType const& _sourceType, bool _padToWord
m_context << Instruction::DUP3 << Instruction::ADD << Instruction::SWAP2;
if (_sourceType.isDynamicallySized())
{
- // actual array data is stored at SHA3(storage_offset)
+ // actual array data is stored at KECCAK256(storage_offset)
m_context << Instruction::SWAP1;
utils.computeHashStatic();
m_context << Instruction::SWAP1;
@@ -497,60 +516,70 @@ void ArrayUtils::copyArrayToMemory(ArrayType const& _sourceType, bool _padToWord
}
}
-void ArrayUtils::clearArray(ArrayType const& _type) const
+void ArrayUtils::clearArray(ArrayType const& _typeIn) const
{
- unsigned stackHeightStart = m_context.stackHeight();
- solAssert(_type.location() == DataLocation::Storage, "");
- if (_type.baseType()->storageBytes() < 32)
- {
- solAssert(_type.baseType()->isValueType(), "Invalid storage size for non-value type.");
- solAssert(_type.baseType()->storageSize() <= 1, "Invalid storage size for type.");
- }
- if (_type.baseType()->isValueType())
- solAssert(_type.baseType()->storageSize() <= 1, "Invalid size for value type.");
-
- m_context << Instruction::POP; // remove byte offset
- if (_type.isDynamicallySized())
- clearDynamicArray(_type);
- else if (_type.length() == 0 || _type.baseType()->category() == Type::Category::Mapping)
- m_context << Instruction::POP;
- else if (_type.baseType()->isValueType() && _type.storageSize() <= 5)
- {
- // unroll loop for small arrays @todo choose a good value
- // Note that we loop over storage slots here, not elements.
- for (unsigned i = 1; i < _type.storageSize(); ++i)
- m_context
- << u256(0) << Instruction::DUP2 << Instruction::SSTORE
- << u256(1) << Instruction::ADD;
- m_context << u256(0) << Instruction::SWAP1 << Instruction::SSTORE;
- }
- else if (!_type.baseType()->isValueType() && _type.length() <= 4)
- {
- // unroll loop for small arrays @todo choose a good value
- solAssert(_type.baseType()->storageBytes() >= 32, "Invalid storage size.");
- for (unsigned i = 1; i < _type.length(); ++i)
+ TypePointer type = _typeIn.shared_from_this();
+ m_context.callLowLevelFunction(
+ "$clearArray_" + _typeIn.identifier(),
+ 2,
+ 0,
+ [type](CompilerContext& _context)
{
- m_context << u256(0);
- StorageItem(m_context, *_type.baseType()).setToZero(SourceLocation(), false);
- m_context
- << Instruction::POP
- << u256(_type.baseType()->storageSize()) << Instruction::ADD;
+ ArrayType const& _type = dynamic_cast<ArrayType const&>(*type);
+ unsigned stackHeightStart = _context.stackHeight();
+ solAssert(_type.location() == DataLocation::Storage, "");
+ if (_type.baseType()->storageBytes() < 32)
+ {
+ solAssert(_type.baseType()->isValueType(), "Invalid storage size for non-value type.");
+ solAssert(_type.baseType()->storageSize() <= 1, "Invalid storage size for type.");
+ }
+ if (_type.baseType()->isValueType())
+ solAssert(_type.baseType()->storageSize() <= 1, "Invalid size for value type.");
+
+ _context << Instruction::POP; // remove byte offset
+ if (_type.isDynamicallySized())
+ ArrayUtils(_context).clearDynamicArray(_type);
+ else if (_type.length() == 0 || _type.baseType()->category() == Type::Category::Mapping)
+ _context << Instruction::POP;
+ else if (_type.baseType()->isValueType() && _type.storageSize() <= 5)
+ {
+ // unroll loop for small arrays @todo choose a good value
+ // Note that we loop over storage slots here, not elements.
+ for (unsigned i = 1; i < _type.storageSize(); ++i)
+ _context
+ << u256(0) << Instruction::DUP2 << Instruction::SSTORE
+ << u256(1) << Instruction::ADD;
+ _context << u256(0) << Instruction::SWAP1 << Instruction::SSTORE;
+ }
+ else if (!_type.baseType()->isValueType() && _type.length() <= 4)
+ {
+ // unroll loop for small arrays @todo choose a good value
+ solAssert(_type.baseType()->storageBytes() >= 32, "Invalid storage size.");
+ for (unsigned i = 1; i < _type.length(); ++i)
+ {
+ _context << u256(0);
+ StorageItem(_context, *_type.baseType()).setToZero(SourceLocation(), false);
+ _context
+ << Instruction::POP
+ << u256(_type.baseType()->storageSize()) << Instruction::ADD;
+ }
+ _context << u256(0);
+ StorageItem(_context, *_type.baseType()).setToZero(SourceLocation(), true);
+ }
+ else
+ {
+ _context << Instruction::DUP1 << _type.length();
+ ArrayUtils(_context).convertLengthToSize(_type);
+ _context << Instruction::ADD << Instruction::SWAP1;
+ if (_type.baseType()->storageBytes() < 32)
+ ArrayUtils(_context).clearStorageLoop(make_shared<IntegerType>(256));
+ else
+ ArrayUtils(_context).clearStorageLoop(_type.baseType());
+ _context << Instruction::POP;
+ }
+ solAssert(_context.stackHeight() == stackHeightStart - 2, "");
}
- m_context << u256(0);
- StorageItem(m_context, *_type.baseType()).setToZero(SourceLocation(), true);
- }
- else
- {
- m_context << Instruction::DUP1 << _type.length();
- convertLengthToSize(_type);
- m_context << Instruction::ADD << Instruction::SWAP1;
- if (_type.baseType()->storageBytes() < 32)
- clearStorageLoop(IntegerType(256));
- else
- clearStorageLoop(*_type.baseType());
- m_context << Instruction::POP;
- }
- solAssert(m_context.stackHeight() == stackHeightStart - 2, "");
+ );
}
void ArrayUtils::clearDynamicArray(ArrayType const& _type) const
@@ -584,191 +613,209 @@ void ArrayUtils::clearDynamicArray(ArrayType const& _type) const
<< Instruction::SWAP1;
// stack: data_pos_end data_pos
if (_type.isByteArray() || _type.baseType()->storageBytes() < 32)
- clearStorageLoop(IntegerType(256));
+ clearStorageLoop(make_shared<IntegerType>(256));
else
- clearStorageLoop(*_type.baseType());
+ clearStorageLoop(_type.baseType());
// cleanup
m_context << endTag;
m_context << Instruction::POP;
}
-void ArrayUtils::resizeDynamicArray(ArrayType const& _type) const
+void ArrayUtils::resizeDynamicArray(ArrayType const& _typeIn) const
{
- solAssert(_type.location() == DataLocation::Storage, "");
- solAssert(_type.isDynamicallySized(), "");
- if (!_type.isByteArray() && _type.baseType()->storageBytes() < 32)
- solAssert(_type.baseType()->isValueType(), "Invalid storage size for non-value type.");
-
- unsigned stackHeightStart = m_context.stackHeight();
- eth::AssemblyItem resizeEnd = m_context.newTag();
-
- // stack: ref new_length
- // fetch old length
- retrieveLength(_type, 1);
- // stack: ref new_length old_length
- solAssert(m_context.stackHeight() - stackHeightStart == 3 - 2, "2");
-
- // Special case for short byte arrays, they are stored together with their length
- if (_type.isByteArray())
- {
- eth::AssemblyItem regularPath = m_context.newTag();
- // We start by a large case-distinction about the old and new length of the byte array.
-
- m_context << Instruction::DUP3 << Instruction::SLOAD;
- // stack: ref new_length current_length ref_value
-
- solAssert(m_context.stackHeight() - stackHeightStart == 4 - 2, "3");
- m_context << Instruction::DUP2 << u256(31) << Instruction::LT;
- eth::AssemblyItem currentIsLong = m_context.appendConditionalJump();
- m_context << Instruction::DUP3 << u256(31) << Instruction::LT;
- eth::AssemblyItem newIsLong = m_context.appendConditionalJump();
-
- // Here: short -> short
-
- // Compute 1 << (256 - 8 * new_size)
- eth::AssemblyItem shortToShort = m_context.newTag();
- m_context << shortToShort;
- m_context << Instruction::DUP3 << u256(8) << Instruction::MUL;
- m_context << u256(0x100) << Instruction::SUB;
- m_context << u256(2) << Instruction::EXP;
- // Divide and multiply by that value, clearing bits.
- m_context << Instruction::DUP1 << Instruction::SWAP2;
- m_context << Instruction::DIV << Instruction::MUL;
- // Insert 2*length.
- m_context << Instruction::DUP3 << Instruction::DUP1 << Instruction::ADD;
- m_context << Instruction::OR;
- // Store.
- m_context << Instruction::DUP4 << Instruction::SSTORE;
- solAssert(m_context.stackHeight() - stackHeightStart == 3 - 2, "3");
- m_context.appendJumpTo(resizeEnd);
-
- m_context.adjustStackOffset(1); // we have to do that because of the jumps
- // Here: short -> long
-
- m_context << newIsLong;
- // stack: ref new_length current_length ref_value
- solAssert(m_context.stackHeight() - stackHeightStart == 4 - 2, "3");
- // Zero out lower-order byte.
- m_context << u256(0xff) << Instruction::NOT << Instruction::AND;
- // Store at data location.
- m_context << Instruction::DUP4;
- CompilerUtils(m_context).computeHashStatic();
- m_context << Instruction::SSTORE;
- // stack: ref new_length current_length
- // Store new length: Compule 2*length + 1 and store it.
- m_context << Instruction::DUP2 << Instruction::DUP1 << Instruction::ADD;
- m_context << u256(1) << Instruction::ADD;
- // stack: ref new_length current_length 2*new_length+1
- m_context << Instruction::DUP4 << Instruction::SSTORE;
- solAssert(m_context.stackHeight() - stackHeightStart == 3 - 2, "3");
- m_context.appendJumpTo(resizeEnd);
-
- m_context.adjustStackOffset(1); // we have to do that because of the jumps
-
- m_context << currentIsLong;
- m_context << Instruction::DUP3 << u256(31) << Instruction::LT;
- m_context.appendConditionalJumpTo(regularPath);
-
- // Here: long -> short
- // Read the first word of the data and store it on the stack. Clear the data location and
- // then jump to the short -> short case.
-
- // stack: ref new_length current_length ref_value
- solAssert(m_context.stackHeight() - stackHeightStart == 4 - 2, "3");
- m_context << Instruction::POP << Instruction::DUP3;
- CompilerUtils(m_context).computeHashStatic();
- m_context << Instruction::DUP1 << Instruction::SLOAD << Instruction::SWAP1;
- // stack: ref new_length current_length first_word data_location
- m_context << Instruction::DUP3;
- convertLengthToSize(_type);
- m_context << Instruction::DUP2 << Instruction::ADD << Instruction::SWAP1;
- // stack: ref new_length current_length first_word data_location_end data_location
- clearStorageLoop(IntegerType(256));
- m_context << Instruction::POP;
- // stack: ref new_length current_length first_word
- solAssert(m_context.stackHeight() - stackHeightStart == 4 - 2, "3");
- m_context.appendJumpTo(shortToShort);
-
- m_context << regularPath;
- // stack: ref new_length current_length ref_value
- m_context << Instruction::POP;
- }
+ TypePointer type = _typeIn.shared_from_this();
+ m_context.callLowLevelFunction(
+ "$resizeDynamicArray_" + _typeIn.identifier(),
+ 2,
+ 0,
+ [type](CompilerContext& _context)
+ {
+ ArrayType const& _type = dynamic_cast<ArrayType const&>(*type);
+ solAssert(_type.location() == DataLocation::Storage, "");
+ solAssert(_type.isDynamicallySized(), "");
+ if (!_type.isByteArray() && _type.baseType()->storageBytes() < 32)
+ solAssert(_type.baseType()->isValueType(), "Invalid storage size for non-value type.");
+
+ unsigned stackHeightStart = _context.stackHeight();
+ eth::AssemblyItem resizeEnd = _context.newTag();
+
+ // stack: ref new_length
+ // fetch old length
+ ArrayUtils(_context).retrieveLength(_type, 1);
+ // stack: ref new_length old_length
+ solAssert(_context.stackHeight() - stackHeightStart == 3 - 2, "2");
+
+ // Special case for short byte arrays, they are stored together with their length
+ if (_type.isByteArray())
+ {
+ eth::AssemblyItem regularPath = _context.newTag();
+ // We start by a large case-distinction about the old and new length of the byte array.
+
+ _context << Instruction::DUP3 << Instruction::SLOAD;
+ // stack: ref new_length current_length ref_value
+
+ solAssert(_context.stackHeight() - stackHeightStart == 4 - 2, "3");
+ _context << Instruction::DUP2 << u256(31) << Instruction::LT;
+ eth::AssemblyItem currentIsLong = _context.appendConditionalJump();
+ _context << Instruction::DUP3 << u256(31) << Instruction::LT;
+ eth::AssemblyItem newIsLong = _context.appendConditionalJump();
+
+ // Here: short -> short
+
+ // Compute 1 << (256 - 8 * new_size)
+ eth::AssemblyItem shortToShort = _context.newTag();
+ _context << shortToShort;
+ _context << Instruction::DUP3 << u256(8) << Instruction::MUL;
+ _context << u256(0x100) << Instruction::SUB;
+ _context << u256(2) << Instruction::EXP;
+ // Divide and multiply by that value, clearing bits.
+ _context << Instruction::DUP1 << Instruction::SWAP2;
+ _context << Instruction::DIV << Instruction::MUL;
+ // Insert 2*length.
+ _context << Instruction::DUP3 << Instruction::DUP1 << Instruction::ADD;
+ _context << Instruction::OR;
+ // Store.
+ _context << Instruction::DUP4 << Instruction::SSTORE;
+ solAssert(_context.stackHeight() - stackHeightStart == 3 - 2, "3");
+ _context.appendJumpTo(resizeEnd);
+
+ _context.adjustStackOffset(1); // we have to do that because of the jumps
+ // Here: short -> long
+
+ _context << newIsLong;
+ // stack: ref new_length current_length ref_value
+ solAssert(_context.stackHeight() - stackHeightStart == 4 - 2, "3");
+ // Zero out lower-order byte.
+ _context << u256(0xff) << Instruction::NOT << Instruction::AND;
+ // Store at data location.
+ _context << Instruction::DUP4;
+ CompilerUtils(_context).computeHashStatic();
+ _context << Instruction::SSTORE;
+ // stack: ref new_length current_length
+ // Store new length: Compule 2*length + 1 and store it.
+ _context << Instruction::DUP2 << Instruction::DUP1 << Instruction::ADD;
+ _context << u256(1) << Instruction::ADD;
+ // stack: ref new_length current_length 2*new_length+1
+ _context << Instruction::DUP4 << Instruction::SSTORE;
+ solAssert(_context.stackHeight() - stackHeightStart == 3 - 2, "3");
+ _context.appendJumpTo(resizeEnd);
+
+ _context.adjustStackOffset(1); // we have to do that because of the jumps
+
+ _context << currentIsLong;
+ _context << Instruction::DUP3 << u256(31) << Instruction::LT;
+ _context.appendConditionalJumpTo(regularPath);
+
+ // Here: long -> short
+ // Read the first word of the data and store it on the stack. Clear the data location and
+ // then jump to the short -> short case.
+
+ // stack: ref new_length current_length ref_value
+ solAssert(_context.stackHeight() - stackHeightStart == 4 - 2, "3");
+ _context << Instruction::POP << Instruction::DUP3;
+ CompilerUtils(_context).computeHashStatic();
+ _context << Instruction::DUP1 << Instruction::SLOAD << Instruction::SWAP1;
+ // stack: ref new_length current_length first_word data_location
+ _context << Instruction::DUP3;
+ ArrayUtils(_context).convertLengthToSize(_type);
+ _context << Instruction::DUP2 << Instruction::ADD << Instruction::SWAP1;
+ // stack: ref new_length current_length first_word data_location_end data_location
+ ArrayUtils(_context).clearStorageLoop(make_shared<IntegerType>(256));
+ _context << Instruction::POP;
+ // stack: ref new_length current_length first_word
+ solAssert(_context.stackHeight() - stackHeightStart == 4 - 2, "3");
+ _context.appendJumpTo(shortToShort);
+
+ _context << regularPath;
+ // stack: ref new_length current_length ref_value
+ _context << Instruction::POP;
+ }
- // Change of length for a regular array (i.e. length at location, data at sha3(location)).
- // stack: ref new_length old_length
- // store new length
- m_context << Instruction::DUP2;
- if (_type.isByteArray())
- // For a "long" byte array, store length as 2*length+1
- m_context << Instruction::DUP1 << Instruction::ADD << u256(1) << Instruction::ADD;
- m_context<< Instruction::DUP4 << Instruction::SSTORE;
- // skip if size is not reduced
- m_context << Instruction::DUP2 << Instruction::DUP2
- << Instruction::ISZERO << Instruction::GT;
- m_context.appendConditionalJumpTo(resizeEnd);
-
- // size reduced, clear the end of the array
- // stack: ref new_length old_length
- convertLengthToSize(_type);
- m_context << Instruction::DUP2;
- convertLengthToSize(_type);
- // stack: ref new_length old_size new_size
- // compute data positions
- m_context << Instruction::DUP4;
- CompilerUtils(m_context).computeHashStatic();
- // stack: ref new_length old_size new_size data_pos
- m_context << Instruction::SWAP2 << Instruction::DUP3 << Instruction::ADD;
- // stack: ref new_length data_pos new_size delete_end
- m_context << Instruction::SWAP2 << Instruction::ADD;
- // stack: ref new_length delete_end delete_start
- if (_type.isByteArray() || _type.baseType()->storageBytes() < 32)
- clearStorageLoop(IntegerType(256));
- else
- clearStorageLoop(*_type.baseType());
+ // Change of length for a regular array (i.e. length at location, data at KECCAK256(location)).
+ // stack: ref new_length old_length
+ // store new length
+ _context << Instruction::DUP2;
+ if (_type.isByteArray())
+ // For a "long" byte array, store length as 2*length+1
+ _context << Instruction::DUP1 << Instruction::ADD << u256(1) << Instruction::ADD;
+ _context<< Instruction::DUP4 << Instruction::SSTORE;
+ // skip if size is not reduced
+ _context << Instruction::DUP2 << Instruction::DUP2
+ << Instruction::ISZERO << Instruction::GT;
+ _context.appendConditionalJumpTo(resizeEnd);
+
+ // size reduced, clear the end of the array
+ // stack: ref new_length old_length
+ ArrayUtils(_context).convertLengthToSize(_type);
+ _context << Instruction::DUP2;
+ ArrayUtils(_context).convertLengthToSize(_type);
+ // stack: ref new_length old_size new_size
+ // compute data positions
+ _context << Instruction::DUP4;
+ CompilerUtils(_context).computeHashStatic();
+ // stack: ref new_length old_size new_size data_pos
+ _context << Instruction::SWAP2 << Instruction::DUP3 << Instruction::ADD;
+ // stack: ref new_length data_pos new_size delete_end
+ _context << Instruction::SWAP2 << Instruction::ADD;
+ // stack: ref new_length delete_end delete_start
+ if (_type.isByteArray() || _type.baseType()->storageBytes() < 32)
+ ArrayUtils(_context).clearStorageLoop(make_shared<IntegerType>(256));
+ else
+ ArrayUtils(_context).clearStorageLoop(_type.baseType());
- m_context << resizeEnd;
- // cleanup
- m_context << Instruction::POP << Instruction::POP << Instruction::POP;
- solAssert(m_context.stackHeight() == stackHeightStart - 2, "");
+ _context << resizeEnd;
+ // cleanup
+ _context << Instruction::POP << Instruction::POP << Instruction::POP;
+ solAssert(_context.stackHeight() == stackHeightStart - 2, "");
+ }
+ );
}
-void ArrayUtils::clearStorageLoop(Type const& _type) const
+void ArrayUtils::clearStorageLoop(TypePointer const& _type) const
{
- unsigned stackHeightStart = m_context.stackHeight();
- if (_type.category() == Type::Category::Mapping)
- {
- m_context << Instruction::POP;
- return;
- }
- // stack: end_pos pos
-
- // jump to and return from the loop to allow for duplicate code removal
- eth::AssemblyItem returnTag = m_context.pushNewTag();
- m_context << Instruction::SWAP2 << Instruction::SWAP1;
-
- // stack: <return tag> end_pos pos
- eth::AssemblyItem loopStart = m_context.appendJumpToNew();
- m_context << loopStart;
- // check for loop condition
- m_context << Instruction::DUP1 << Instruction::DUP3
- << Instruction::GT << Instruction::ISZERO;
- eth::AssemblyItem zeroLoopEnd = m_context.newTag();
- m_context.appendConditionalJumpTo(zeroLoopEnd);
- // delete
- m_context << u256(0);
- StorageItem(m_context, _type).setToZero(SourceLocation(), false);
- m_context << Instruction::POP;
- // increment
- m_context << _type.storageSize() << Instruction::ADD;
- m_context.appendJumpTo(loopStart);
- // cleanup
- m_context << zeroLoopEnd;
- m_context << Instruction::POP << Instruction::SWAP1;
- // "return"
- m_context << Instruction::JUMP;
-
- m_context << returnTag;
- solAssert(m_context.stackHeight() == stackHeightStart - 1, "");
+ m_context.callLowLevelFunction(
+ "$clearStorageLoop_" + _type->identifier(),
+ 2,
+ 1,
+ [_type](CompilerContext& _context)
+ {
+ unsigned stackHeightStart = _context.stackHeight();
+ if (_type->category() == Type::Category::Mapping)
+ {
+ _context << Instruction::POP;
+ return;
+ }
+ // stack: end_pos pos
+
+ // jump to and return from the loop to allow for duplicate code removal
+ eth::AssemblyItem returnTag = _context.pushNewTag();
+ _context << Instruction::SWAP2 << Instruction::SWAP1;
+
+ // stack: <return tag> end_pos pos
+ eth::AssemblyItem loopStart = _context.appendJumpToNew();
+ _context << loopStart;
+ // check for loop condition
+ _context << Instruction::DUP1 << Instruction::DUP3
+ << Instruction::GT << Instruction::ISZERO;
+ eth::AssemblyItem zeroLoopEnd = _context.newTag();
+ _context.appendConditionalJumpTo(zeroLoopEnd);
+ // delete
+ _context << u256(0);
+ StorageItem(_context, *_type).setToZero(SourceLocation(), false);
+ _context << Instruction::POP;
+ // increment
+ _context << _type->storageSize() << Instruction::ADD;
+ _context.appendJumpTo(loopStart);
+ // cleanup
+ _context << zeroLoopEnd;
+ _context << Instruction::POP << Instruction::SWAP1;
+ // "return"
+ _context << Instruction::JUMP;
+
+ _context << returnTag;
+ solAssert(_context.stackHeight() == stackHeightStart - 1, "");
+ }
+ );
}
void ArrayUtils::convertLengthToSize(ArrayType const& _arrayType, bool _pad) const
@@ -854,7 +901,7 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType, bool _doBoundsCheck) c
// check out-of-bounds access
m_context << Instruction::DUP2 << Instruction::LT << Instruction::ISZERO;
// out-of-bounds access throws exception
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalInvalid();
}
if (location == DataLocation::CallData && _arrayType.isDynamicallySized())
// remove length if present
diff --git a/libsolidity/codegen/ArrayUtils.h b/libsolidity/codegen/ArrayUtils.h
index d0ba2892..806fbea7 100644
--- a/libsolidity/codegen/ArrayUtils.h
+++ b/libsolidity/codegen/ArrayUtils.h
@@ -22,6 +22,8 @@
#pragma once
+#include <memory>
+
namespace dev
{
namespace solidity
@@ -30,6 +32,7 @@ namespace solidity
class CompilerContext;
class Type;
class ArrayType;
+using TypePointer = std::shared_ptr<Type const>;
/**
* Class that provides code generation for handling arrays.
@@ -67,7 +70,7 @@ public:
/// Appends a loop that clears a sequence of storage slots of the given type (excluding end).
/// Stack pre: end_ref start_ref
/// Stack post: end_ref
- void clearStorageLoop(Type const& _type) const;
+ void clearStorageLoop(TypePointer const& _type) const;
/// Converts length to size (number of storage slots or calldata/memory bytes).
/// if @a _pad then add padding to multiples of 32 bytes for calldata/memory.
/// Stack pre: length
diff --git a/libsolidity/codegen/CompilerContext.cpp b/libsolidity/codegen/CompilerContext.cpp
index 2de5a3ec..6875bda1 100644
--- a/libsolidity/codegen/CompilerContext.cpp
+++ b/libsolidity/codegen/CompilerContext.cpp
@@ -21,14 +21,21 @@
*/
#include <libsolidity/codegen/CompilerContext.h>
-#include <utility>
-#include <numeric>
-#include <boost/algorithm/string/replace.hpp>
+#include <libsolidity/codegen/CompilerUtils.h>
#include <libsolidity/ast/AST.h>
#include <libsolidity/codegen/Compiler.h>
#include <libsolidity/interface/Version.h>
-#include <libsolidity/inlineasm/AsmData.h>
-#include <libsolidity/inlineasm/AsmStack.h>
+#include <libsolidity/interface/ErrorReporter.h>
+#include <libsolidity/parsing/Scanner.h>
+#include <libsolidity/inlineasm/AsmParser.h>
+#include <libsolidity/inlineasm/AsmCodeGen.h>
+#include <libsolidity/inlineasm/AsmAnalysis.h>
+#include <libsolidity/inlineasm/AsmAnalysisInfo.h>
+
+#include <boost/algorithm/string/replace.hpp>
+
+#include <utility>
+#include <numeric>
using namespace std;
@@ -57,10 +64,67 @@ void CompilerContext::startFunction(Declaration const& _function)
*this << functionEntryLabel(_function);
}
+void CompilerContext::callLowLevelFunction(
+ string const& _name,
+ unsigned _inArgs,
+ unsigned _outArgs,
+ function<void(CompilerContext&)> const& _generator
+)
+{
+ eth::AssemblyItem retTag = pushNewTag();
+ CompilerUtils(*this).moveIntoStack(_inArgs);
+
+ *this << lowLevelFunctionTag(_name, _inArgs, _outArgs, _generator);
+
+ appendJump(eth::AssemblyItem::JumpType::IntoFunction);
+ adjustStackOffset(int(_outArgs) - 1 - _inArgs);
+ *this << retTag.tag();
+}
+
+eth::AssemblyItem CompilerContext::lowLevelFunctionTag(
+ string const& _name,
+ unsigned _inArgs,
+ unsigned _outArgs,
+ function<void(CompilerContext&)> const& _generator
+)
+{
+ auto it = m_lowLevelFunctions.find(_name);
+ if (it == m_lowLevelFunctions.end())
+ {
+ eth::AssemblyItem tag = newTag().pushTag();
+ m_lowLevelFunctions.insert(make_pair(_name, tag));
+ m_lowLevelFunctionGenerationQueue.push(make_tuple(_name, _inArgs, _outArgs, _generator));
+ return tag;
+ }
+ else
+ return it->second;
+}
+
+void CompilerContext::appendMissingLowLevelFunctions()
+{
+ while (!m_lowLevelFunctionGenerationQueue.empty())
+ {
+ string name;
+ unsigned inArgs;
+ unsigned outArgs;
+ function<void(CompilerContext&)> generator;
+ tie(name, inArgs, outArgs, generator) = m_lowLevelFunctionGenerationQueue.front();
+ m_lowLevelFunctionGenerationQueue.pop();
+
+ setStackOffset(inArgs + 1);
+ *this << m_lowLevelFunctions.at(name).tag();
+ generator(*this);
+ CompilerUtils(*this).moveToStackTop(outArgs);
+ appendJump(eth::AssemblyItem::JumpType::OutOfFunction);
+ solAssert(stackHeight() == outArgs, "Invalid stack height in low-level function " + name + ".");
+ }
+}
+
void CompilerContext::addVariable(VariableDeclaration const& _declaration,
unsigned _offsetToCurrent)
{
solAssert(m_asm->deposit() >= 0 && unsigned(m_asm->deposit()) >= _offsetToCurrent, "");
+ solAssert(m_localVariables.count(&_declaration) == 0, "Variable already present");
m_localVariables[&_declaration] = unsigned(m_asm->deposit()) - _offsetToCurrent;
}
@@ -167,6 +231,34 @@ CompilerContext& CompilerContext::appendJump(eth::AssemblyItem::JumpType _jumpTy
return *this << item;
}
+CompilerContext& CompilerContext::appendInvalid()
+{
+ return *this << Instruction::INVALID;
+}
+
+CompilerContext& CompilerContext::appendConditionalInvalid()
+{
+ *this << Instruction::ISZERO;
+ eth::AssemblyItem afterTag = appendConditionalJump();
+ *this << Instruction::INVALID;
+ *this << afterTag;
+ return *this;
+}
+
+CompilerContext& CompilerContext::appendRevert()
+{
+ return *this << u256(0) << u256(0) << Instruction::REVERT;
+}
+
+CompilerContext& CompilerContext::appendConditionalRevert()
+{
+ *this << Instruction::ISZERO;
+ eth::AssemblyItem afterTag = appendConditionalJump();
+ appendRevert();
+ *this << afterTag;
+ return *this;
+}
+
void CompilerContext::resetVisitedNodes(ASTNode const* _node)
{
stack<ASTNode const*> newStack;
@@ -191,33 +283,56 @@ void CompilerContext::appendInlineAssembly(
assembly = &replacedAssembly;
}
- unsigned startStackHeight = stackHeight();
- auto identifierAccess = [&](
+ int startStackHeight = stackHeight();
+
+ julia::ExternalIdentifierAccess identifierAccess;
+ identifierAccess.resolve = [&](
assembly::Identifier const& _identifier,
- eth::Assembly& _assembly,
- assembly::CodeGenerator::IdentifierContext _context
- ) {
+ julia::IdentifierContext,
+ bool
+ )
+ {
+ auto it = std::find(_localVariables.begin(), _localVariables.end(), _identifier.name);
+ return it == _localVariables.end() ? size_t(-1) : 1;
+ };
+ identifierAccess.generateCode = [&](
+ assembly::Identifier const& _identifier,
+ julia::IdentifierContext _context,
+ julia::AbstractAssembly& _assembly
+ )
+ {
auto it = std::find(_localVariables.begin(), _localVariables.end(), _identifier.name);
- if (it == _localVariables.end())
- return false;
- unsigned stackDepth = _localVariables.end() - it;
- int stackDiff = _assembly.deposit() - startStackHeight + stackDepth;
+ solAssert(it != _localVariables.end(), "");
+ int stackDepth = _localVariables.end() - it;
+ int stackDiff = _assembly.stackHeight() - startStackHeight + stackDepth;
+ if (_context == julia::IdentifierContext::LValue)
+ stackDiff -= 1;
if (stackDiff < 1 || stackDiff > 16)
BOOST_THROW_EXCEPTION(
CompilerError() <<
- errinfo_comment("Stack too deep, try removing local variables.")
+ errinfo_comment("Stack too deep (" + to_string(stackDiff) + "), try removing local variables.")
);
- if (_context == assembly::CodeGenerator::IdentifierContext::RValue)
- _assembly.append(dupInstruction(stackDiff));
+ if (_context == julia::IdentifierContext::RValue)
+ _assembly.appendInstruction(dupInstruction(stackDiff));
else
{
- _assembly.append(swapInstruction(stackDiff));
- _assembly.append(Instruction::POP);
+ _assembly.appendInstruction(swapInstruction(stackDiff));
+ _assembly.appendInstruction(Instruction::POP);
}
- return true;
};
- solAssert(assembly::InlineAssemblyStack().parseAndAssemble(*assembly, *m_asm, identifierAccess), "");
+ ErrorList errors;
+ ErrorReporter errorReporter(errors);
+ auto scanner = make_shared<Scanner>(CharStream(*assembly), "--CODEGEN--");
+ auto parserResult = assembly::Parser(errorReporter).parse(scanner);
+ solAssert(parserResult, "Failed to parse inline assembly block.");
+ solAssert(errorReporter.errors().empty(), "Failed to parse inline assembly block.");
+
+ assembly::AsmAnalysisInfo analysisInfo;
+ assembly::AsmAnalyzer analyzer(analysisInfo, errorReporter, false, identifierAccess.resolve);
+ solAssert(analyzer.analyze(*parserResult), "Failed to analyze inline assembly block.");
+ solAssert(errorReporter.errors().empty(), "Failed to analyze inline assembly block.");
+ assembly::CodeGenerator::assemble(*parserResult, analysisInfo, *m_asm, identifierAccess);
}
FunctionDefinition const& CompilerContext::resolveVirtualFunction(
diff --git a/libsolidity/codegen/CompilerContext.h b/libsolidity/codegen/CompilerContext.h
index 80671528..1968c1e1 100644
--- a/libsolidity/codegen/CompilerContext.h
+++ b/libsolidity/codegen/CompilerContext.h
@@ -22,17 +22,21 @@
#pragma once
-#include <ostream>
-#include <stack>
-#include <queue>
-#include <utility>
-#include <libevmasm/Instruction.h>
-#include <libevmasm/Assembly.h>
#include <libsolidity/ast/ASTForward.h>
#include <libsolidity/ast/Types.h>
#include <libsolidity/ast/ASTAnnotations.h>
+
+#include <libevmasm/Instruction.h>
+#include <libevmasm/Assembly.h>
+
#include <libdevcore/Common.h>
+#include <ostream>
+#include <stack>
+#include <queue>
+#include <utility>
+#include <functional>
+
namespace dev {
namespace solidity {
@@ -90,6 +94,29 @@ public:
/// as "having code".
void startFunction(Declaration const& _function);
+ /// Appends a call to the named low-level function and inserts the generator into the
+ /// list of low-level-functions to be generated, unless it already exists.
+ /// Note that the generator should not assume that objects are still alive when it is called,
+ /// unless they are guaranteed to be alive for the whole run of the compiler (AST nodes, for example).
+ void callLowLevelFunction(
+ std::string const& _name,
+ unsigned _inArgs,
+ unsigned _outArgs,
+ std::function<void(CompilerContext&)> const& _generator
+ );
+ /// Returns the tag of the named low-level function and inserts the generator into the
+ /// list of low-level-functions to be generated, unless it already exists.
+ /// Note that the generator should not assume that objects are still alive when it is called,
+ /// unless they are guaranteed to be alive for the whole run of the compiler (AST nodes, for example).
+ eth::AssemblyItem lowLevelFunctionTag(
+ std::string const& _name,
+ unsigned _inArgs,
+ unsigned _outArgs,
+ std::function<void(CompilerContext&)> const& _generator
+ );
+ /// Generates the code for missing low-level functions, i.e. calls the generators passed above.
+ void appendMissingLowLevelFunctions();
+
ModifierDefinition const& functionModifier(std::string const& _name) const;
/// Returns the distance of the given local variable from the bottom of the stack (of the current function).
unsigned baseStackOffsetOfVariable(Declaration const& _declaration) const;
@@ -109,9 +136,15 @@ public:
/// Appends a JUMP to a new tag and @returns the tag
eth::AssemblyItem appendJumpToNew() { return m_asm->appendJump().tag(); }
/// Appends a JUMP to a tag already on the stack
- CompilerContext& appendJump(eth::AssemblyItem::JumpType _jumpType = eth::AssemblyItem::JumpType::Ordinary);
- /// Returns an "ErrorTag"
- eth::AssemblyItem errorTag() { return m_asm->errorTag(); }
+ CompilerContext& appendJump(eth::AssemblyItem::JumpType _jumpType = eth::AssemblyItem::JumpType::Ordinary);
+ /// Appends an INVALID instruction
+ CompilerContext& appendInvalid();
+ /// Appends a conditional INVALID instruction
+ CompilerContext& appendConditionalInvalid();
+ /// Appends a REVERT(0, 0) call
+ CompilerContext& appendRevert();
+ /// Appends a conditional REVERT(0, 0) call
+ CompilerContext& appendConditionalRevert();
/// Appends a JUMP to a specific tag
CompilerContext& appendJumpTo(eth::AssemblyItem const& _tag) { m_asm->appendJump(_tag); return *this; }
/// Appends pushing of a new tag and @returns the new tag.
@@ -120,10 +153,10 @@ public:
eth::AssemblyItem newTag() { return m_asm->newTag(); }
/// Adds a subroutine to the code (in the data section) and pushes its size (via a tag)
/// on the stack. @returns the pushsub assembly item.
- eth::AssemblyItem addSubroutine(eth::AssemblyPointer const& _assembly) { auto sub = m_asm->newSub(_assembly); m_asm->append(m_asm->newPushSubSize(size_t(sub.data()))); return sub; }
- void pushSubroutineSize(size_t _subRoutine) { m_asm->append(m_asm->newPushSubSize(_subRoutine)); }
+ eth::AssemblyItem addSubroutine(eth::AssemblyPointer const& _assembly) { return m_asm->appendSubroutine(_assembly); }
+ void pushSubroutineSize(size_t _subRoutine) { m_asm->pushSubroutineSize(_subRoutine); }
/// Pushes the offset of the subroutine.
- void pushSubroutineOffset(size_t _subRoutine) { m_asm->append(eth::AssemblyItem(eth::PushSub, _subRoutine)); }
+ void pushSubroutineOffset(size_t _subRoutine) { m_asm->pushSubroutineOffset(_subRoutine); }
/// Pushes the size of the final program
void appendProgramSize() { m_asm->appendProgramSize(); }
/// Adds data to the data section, pushes a reference to the stack
@@ -248,6 +281,10 @@ private:
CompilerContext *m_runtimeContext;
/// The index of the runtime subroutine.
size_t m_runtimeSub = -1;
+ /// An index of low-level function labels by name.
+ std::map<std::string, eth::AssemblyItem> m_lowLevelFunctions;
+ /// The queue of low-level functions to generate.
+ std::queue<std::tuple<std::string, unsigned, unsigned, std::function<void(CompilerContext&)>>> m_lowLevelFunctionGenerationQueue;
};
}
diff --git a/libsolidity/codegen/CompilerUtils.cpp b/libsolidity/codegen/CompilerUtils.cpp
index d5361ac6..7067ddd5 100644
--- a/libsolidity/codegen/CompilerUtils.cpp
+++ b/libsolidity/codegen/CompilerUtils.cpp
@@ -128,14 +128,14 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
m_context << Instruction::DUP1;
storeStringData(bytesConstRef(str->value()));
if (_padToWordBoundaries)
- m_context << u256(((str->value().size() + 31) / 32) * 32);
+ m_context << u256(max<size_t>(32, ((str->value().size() + 31) / 32) * 32));
else
m_context << u256(str->value().size());
m_context << Instruction::ADD;
}
else if (
_type.category() == Type::Category::Function &&
- dynamic_cast<FunctionType const&>(_type).location() == FunctionType::Location::External
+ dynamic_cast<FunctionType const&>(_type).kind() == FunctionType::Kind::External
)
{
solUnimplementedAssert(_padToWordBoundaries, "Non-padded store for function not implemented.");
@@ -180,6 +180,9 @@ void CompilerUtils::encodeToMemory(
t = t->mobileType()->interfaceType(_encodeAsLibraryTypes)->encodingType();
}
+ if (_givenTypes.empty())
+ return;
+
// Stack during operation:
// <v1> <v2> ... <vn> <mem_start> <dyn_head_1> ... <dyn_head_r> <end_of_mem>
// The values dyn_head_i are added during the first loop and they point to the head part
@@ -200,6 +203,7 @@ void CompilerUtils::encodeToMemory(
// leave end_of_mem as dyn head pointer
m_context << Instruction::DUP1 << u256(32) << Instruction::ADD;
dynPointers++;
+ solAssert((argSize + dynPointers) < 16, "Stack too deep, try using less variables.");
}
else
{
@@ -298,21 +302,49 @@ void CompilerUtils::zeroInitialiseMemoryArray(ArrayType const& _type)
m_context << Instruction::SWAP1 << Instruction::POP;
}
+void CompilerUtils::memoryCopy32()
+{
+ // Stack here: size target source
+
+ m_context.appendInlineAssembly(R"(
+ {
+ for { let i := 0 } lt(i, len) { i := add(i, 32) } {
+ mstore(add(dst, i), mload(add(src, i)))
+ }
+ }
+ )",
+ { "len", "dst", "src" }
+ );
+ m_context << Instruction::POP << Instruction::POP << Instruction::POP;
+}
+
void CompilerUtils::memoryCopy()
{
// Stack here: size target source
- // stack for call: outsize target size source value contract gas
- //@TODO do not use ::CALL if less than 32 bytes?
- m_context << Instruction::DUP3 << Instruction::SWAP1;
- m_context << u256(0) << u256(identityContractAddress);
- // compute gas costs
- m_context << u256(32) << Instruction::DUP5 << u256(31) << Instruction::ADD;
- static unsigned c_identityGas = 15;
- static unsigned c_identityWordGas = 3;
- m_context << Instruction::DIV << u256(c_identityWordGas) << Instruction::MUL;
- m_context << u256(c_identityGas) << Instruction::ADD;
- m_context << Instruction::CALL;
- m_context << Instruction::POP; // ignore return value
+
+ m_context.appendInlineAssembly(R"(
+ {
+ // copy 32 bytes at once
+ for
+ {}
+ iszero(lt(len, 32))
+ {
+ dst := add(dst, 32)
+ src := add(src, 32)
+ len := sub(len, 32)
+ }
+ { mstore(dst, mload(src)) }
+
+ // copy the remainder (0 < len < 32)
+ let mask := sub(exp(256, sub(32, len)), 1)
+ let srcpart := and(mload(src), not(mask))
+ let dstpart := and(mload(dst), mask)
+ mstore(dst, or(srcpart, dstpart))
+ }
+ )",
+ { "len", "dst", "src" }
+ );
+ m_context << Instruction::POP << Instruction::POP << Instruction::POP;
}
void CompilerUtils::splitExternalFunctionType(bool _leftAligned)
@@ -321,13 +353,16 @@ void CompilerUtils::splitExternalFunctionType(bool _leftAligned)
// address (right aligned), function identifier (right aligned)
if (_leftAligned)
{
- m_context << Instruction::DUP1 << (u256(1) << (64 + 32)) << Instruction::SWAP1 << Instruction::DIV;
+ m_context << Instruction::DUP1;
+ rightShiftNumberOnStack(64 + 32, false);
// <input> <address>
- m_context << Instruction::SWAP1 << (u256(1) << 64) << Instruction::SWAP1 << Instruction::DIV;
+ m_context << Instruction::SWAP1;
+ rightShiftNumberOnStack(64, false);
}
else
{
- m_context << Instruction::DUP1 << (u256(1) << 32) << Instruction::SWAP1 << Instruction::DIV;
+ m_context << Instruction::DUP1;
+ rightShiftNumberOnStack(32, false);
m_context << ((u256(1) << 160) - 1) << Instruction::AND << Instruction::SWAP1;
}
m_context << u256(0xffffffffUL) << Instruction::AND;
@@ -339,10 +374,10 @@ void CompilerUtils::combineExternalFunctionType(bool _leftAligned)
m_context << u256(0xffffffffUL) << Instruction::AND << Instruction::SWAP1;
if (!_leftAligned)
m_context << ((u256(1) << 160) - 1) << Instruction::AND;
- m_context << (u256(1) << 32) << Instruction::MUL;
+ leftShiftNumberOnStack(32);
m_context << Instruction::OR;
if (_leftAligned)
- m_context << (u256(1) << 64) << Instruction::MUL;
+ leftShiftNumberOnStack(64);
}
void CompilerUtils::pushCombinedFunctionEntryLabel(Declaration const& _function)
@@ -351,14 +386,21 @@ void CompilerUtils::pushCombinedFunctionEntryLabel(Declaration const& _function)
// If there is a runtime context, we have to merge both labels into the same
// stack slot in case we store it in storage.
if (CompilerContext* rtc = m_context.runtimeContext())
+ {
+ leftShiftNumberOnStack(32);
m_context <<
- (u256(1) << 32) <<
- Instruction::MUL <<
rtc->functionEntryLabel(_function).toSubAssemblyTag(m_context.runtimeSub()) <<
Instruction::OR;
+ }
}
-void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded, bool _chopSignBits)
+void CompilerUtils::convertType(
+ Type const& _typeOnStack,
+ Type const& _targetType,
+ bool _cleanupNeeded,
+ bool _chopSignBits,
+ bool _asPartOfArgumentDecoding
+)
{
// For a type extension, we need to remove all higher-order bits that we might have ignored in
// previous operations.
@@ -387,7 +429,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
// conversion from bytes to integer. no need to clean the high bit
// only to shift right because of opposite alignment
IntegerType const& targetIntegerType = dynamic_cast<IntegerType const&>(_targetType);
- m_context << (u256(1) << (256 - typeOnStack.numBytes() * 8)) << Instruction::SWAP1 << Instruction::DIV;
+ rightShiftNumberOnStack(256 - typeOnStack.numBytes() * 8, false);
if (targetIntegerType.numBits() < typeOnStack.numBytes() * 8)
convertType(IntegerType(typeOnStack.numBytes() * 8), _targetType, _cleanupNeeded);
}
@@ -416,7 +458,10 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
EnumType const& enumType = dynamic_cast<decltype(enumType)>(_typeOnStack);
solAssert(enumType.numberOfMembers() > 0, "empty enum should have caused a parser error.");
m_context << u256(enumType.numberOfMembers() - 1) << Instruction::DUP2 << Instruction::GT;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ if (_asPartOfArgumentDecoding)
+ m_context.appendConditionalRevert();
+ else
+ m_context.appendConditionalInvalid();
enumOverflowCheckPending = false;
}
break;
@@ -435,7 +480,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
if (auto typeOnStack = dynamic_cast<IntegerType const*>(&_typeOnStack))
if (targetBytesType.numBytes() * 8 > typeOnStack->numBits())
cleanHigherOrderBits(*typeOnStack);
- m_context << (u256(1) << (256 - targetBytesType.numBytes() * 8)) << Instruction::MUL;
+ leftShiftNumberOnStack(256 - targetBytesType.numBytes() * 8);
}
else if (targetTypeCategory == Type::Category::Enum)
{
@@ -445,7 +490,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
EnumType const& enumType = dynamic_cast<decltype(enumType)>(_targetType);
solAssert(enumType.numberOfMembers() > 0, "empty enum should have caused a parser error.");
m_context << u256(enumType.numberOfMembers() - 1) << Instruction::DUP2 << Instruction::GT;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalInvalid();
enumOverflowCheckPending = false;
}
else if (targetTypeCategory == Type::Category::FixedPoint)
@@ -735,6 +780,20 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
if (_cleanupNeeded)
m_context << Instruction::ISZERO << Instruction::ISZERO;
break;
+ case Type::Category::Function:
+ {
+ if (targetTypeCategory == Type::Category::Integer)
+ {
+ IntegerType const& targetType = dynamic_cast<IntegerType const&>(_targetType);
+ solAssert(targetType.isAddress(), "Function type can only be converted to address.");
+ FunctionType const& typeOnStack = dynamic_cast<FunctionType const&>(_typeOnStack);
+ solAssert(typeOnStack.kind() == FunctionType::Kind::External, "Only external function type can be converted.");
+
+ // stack: <address> <function_id>
+ m_context << Instruction::POP;
+ break;
+ }
+ }
default:
// All other types should not be convertible to non-equal types.
solAssert(_typeOnStack == _targetType, "Invalid type conversion requested.");
@@ -753,9 +812,11 @@ void CompilerUtils::pushZeroValue(Type const& _type)
{
if (auto const* funType = dynamic_cast<FunctionType const*>(&_type))
{
- if (funType->location() == FunctionType::Location::Internal)
+ if (funType->kind() == FunctionType::Kind::Internal)
{
- m_context << m_context.errorTag();
+ m_context << m_context.lowLevelFunctionTag("$invalidFunction", 0, 0, [](CompilerContext& _context) {
+ _context.appendInvalid();
+ });
return;
}
}
@@ -768,37 +829,46 @@ void CompilerUtils::pushZeroValue(Type const& _type)
}
solAssert(referenceType->location() == DataLocation::Memory, "");
- m_context << u256(max(32u, _type.calldataEncodedSize()));
- allocateMemory();
- m_context << Instruction::DUP1;
+ TypePointer type = _type.shared_from_this();
+ m_context.callLowLevelFunction(
+ "$pushZeroValue_" + referenceType->identifier(),
+ 0,
+ 1,
+ [type](CompilerContext& _context) {
+ CompilerUtils utils(_context);
+ _context << u256(max(32u, type->calldataEncodedSize()));
+ utils.allocateMemory();
+ _context << Instruction::DUP1;
- if (auto structType = dynamic_cast<StructType const*>(&_type))
- for (auto const& member: structType->members(nullptr))
- {
- pushZeroValue(*member.type);
- storeInMemoryDynamic(*member.type);
- }
- else if (auto arrayType = dynamic_cast<ArrayType const*>(&_type))
- {
- if (arrayType->isDynamicallySized())
- {
- // zero length
- m_context << u256(0);
- storeInMemoryDynamic(IntegerType(256));
- }
- else if (arrayType->length() > 0)
- {
- m_context << arrayType->length() << Instruction::SWAP1;
- // stack: items_to_do memory_pos
- zeroInitialiseMemoryArray(*arrayType);
- // stack: updated_memory_pos
- }
- }
- else
- solAssert(false, "Requested initialisation for unknown type: " + _type.toString());
+ if (auto structType = dynamic_cast<StructType const*>(type.get()))
+ for (auto const& member: structType->members(nullptr))
+ {
+ utils.pushZeroValue(*member.type);
+ utils.storeInMemoryDynamic(*member.type);
+ }
+ else if (auto arrayType = dynamic_cast<ArrayType const*>(type.get()))
+ {
+ if (arrayType->isDynamicallySized())
+ {
+ // zero length
+ _context << u256(0);
+ utils.storeInMemoryDynamic(IntegerType(256));
+ }
+ else if (arrayType->length() > 0)
+ {
+ _context << arrayType->length() << Instruction::SWAP1;
+ // stack: items_to_do memory_pos
+ utils.zeroInitialiseMemoryArray(*arrayType);
+ // stack: updated_memory_pos
+ }
+ }
+ else
+ solAssert(false, "Requested initialisation for unknown type: " + type->toString());
- // remove the updated memory pointer
- m_context << Instruction::POP;
+ // remove the updated memory pointer
+ _context << Instruction::POP;
+ }
+ );
}
void CompilerUtils::moveToStackVariable(VariableDeclaration const& _variable)
@@ -875,7 +945,7 @@ unsigned CompilerUtils::sizeOnStack(vector<shared_ptr<Type const>> const& _varia
void CompilerUtils::computeHashStatic()
{
storeInMemory(0);
- m_context << u256(32) << u256(0) << Instruction::SHA3;
+ m_context << u256(32) << u256(0) << Instruction::KECCAK256;
}
void CompilerUtils::storeStringData(bytesConstRef _data)
@@ -900,12 +970,12 @@ void CompilerUtils::storeStringData(bytesConstRef _data)
}
}
-unsigned CompilerUtils::loadFromMemoryHelper(Type const& _type, bool _fromCalldata, bool _padToWordBoundaries)
+unsigned CompilerUtils::loadFromMemoryHelper(Type const& _type, bool _fromCalldata, bool _padToWords)
{
- unsigned numBytes = _type.calldataEncodedSize(_padToWordBoundaries);
+ unsigned numBytes = _type.calldataEncodedSize(_padToWords);
bool isExternalFunctionType = false;
if (auto const* funType = dynamic_cast<FunctionType const*>(&_type))
- if (funType->location() == FunctionType::Location::External)
+ if (funType->kind() == FunctionType::Kind::External)
isExternalFunctionType = true;
if (numBytes == 0)
{
@@ -920,11 +990,13 @@ unsigned CompilerUtils::loadFromMemoryHelper(Type const& _type, bool _fromCallda
{
bool leftAligned = _type.category() == Type::Category::FixedBytes;
// add leading or trailing zeros by dividing/multiplying depending on alignment
- u256 shiftFactor = u256(1) << ((32 - numBytes) * 8);
- m_context << shiftFactor << Instruction::SWAP1 << Instruction::DIV;
+ int shiftFactor = (32 - numBytes) * 8;
+ rightShiftNumberOnStack(shiftFactor, false);
if (leftAligned)
- m_context << shiftFactor << Instruction::MUL;
+ leftShiftNumberOnStack(shiftFactor);
}
+ if (_fromCalldata)
+ convertType(_type, _type, true, false, true);
return numBytes;
}
@@ -939,18 +1011,31 @@ void CompilerUtils::cleanHigherOrderBits(IntegerType const& _typeOnStack)
m_context << ((u256(1) << _typeOnStack.numBits()) - 1) << Instruction::AND;
}
-unsigned CompilerUtils::prepareMemoryStore(Type const& _type, bool _padToWordBoundaries) const
+void CompilerUtils::leftShiftNumberOnStack(unsigned _bits)
+{
+ solAssert(_bits < 256, "");
+ m_context << (u256(1) << _bits) << Instruction::MUL;
+}
+
+void CompilerUtils::rightShiftNumberOnStack(unsigned _bits, bool _isSigned)
+{
+ solAssert(_bits < 256, "");
+ m_context << (u256(1) << _bits) << Instruction::SWAP1 << (_isSigned ? Instruction::SDIV : Instruction::DIV);
+}
+
+unsigned CompilerUtils::prepareMemoryStore(Type const& _type, bool _padToWords)
{
- unsigned numBytes = _type.calldataEncodedSize(_padToWordBoundaries);
+ unsigned numBytes = _type.calldataEncodedSize(_padToWords);
bool leftAligned = _type.category() == Type::Category::FixedBytes;
if (numBytes == 0)
m_context << Instruction::POP;
else
{
solAssert(numBytes <= 32, "Memory store of more than 32 bytes requested.");
- if (numBytes != 32 && !leftAligned && !_padToWordBoundaries)
+ convertType(_type, _type, true);
+ if (numBytes != 32 && !leftAligned && !_padToWords)
// shift the value accordingly before storing
- m_context << (u256(1) << ((32 - numBytes) * 8)) << Instruction::MUL;
+ leftShiftNumberOnStack((32 - numBytes) * 8);
}
return numBytes;
}
diff --git a/libsolidity/codegen/CompilerUtils.h b/libsolidity/codegen/CompilerUtils.h
index 4baf48ff..fb169463 100644
--- a/libsolidity/codegen/CompilerUtils.h
+++ b/libsolidity/codegen/CompilerUtils.h
@@ -52,13 +52,13 @@ public:
/// @param _offset offset in memory (or calldata)
/// @param _type data type to load
/// @param _fromCalldata if true, load from calldata, not from memory
- /// @param _padToWordBoundaries if true, assume the data is padded to word (32 byte) boundaries
+ /// @param _padToWords if true, assume the data is padded to full words (32 bytes)
/// @returns the number of bytes consumed in memory.
unsigned loadFromMemory(
unsigned _offset,
Type const& _type = IntegerType(256),
bool _fromCalldata = false,
- bool _padToWordBoundaries = false
+ bool _padToWords = false
);
/// Dynamic version of @see loadFromMemory, expects the memory offset on the stack.
/// Stack pre: memory_offset
@@ -66,7 +66,7 @@ public:
void loadFromMemoryDynamic(
Type const& _type,
bool _fromCalldata = false,
- bool _padToWordBoundaries = true,
+ bool _padToWords = true,
bool _keepUpdatedMemoryOffset = true
);
/// Stores a 256 bit integer from stack in memory.
@@ -76,11 +76,11 @@ public:
/// Dynamic version of @see storeInMemory, expects the memory offset below the value on the stack
/// and also updates that. For reference types, only copies the data pointer. Fails for
/// non-memory-references.
- /// @param _padToWordBoundaries if true, adds zeros to pad to multiple of 32 bytes. Array elements
+ /// @param _padToWords if true, adds zeros to pad to multiple of 32 bytes. Array elements
/// are always padded (except for byte arrays), regardless of this parameter.
/// Stack pre: memory_offset value...
/// Stack post: (memory_offset+length)
- void storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries = true);
+ void storeInMemoryDynamic(Type const& _type, bool _padToWords = true);
/// Copies values (of types @a _givenTypes) given on the stack to a location in memory given
/// at the stack top, encoding them according to the ABI as the given types @a _targetTypes.
@@ -88,7 +88,7 @@ public:
/// Stack pre: <v1> <v2> ... <vn> <memptr>
/// Stack post: <memptr_updated>
/// Does not touch the memory-free pointer.
- /// @param _padToWordBoundaries if false, all values are concatenated without padding.
+ /// @param _padToWords if false, all values are concatenated without padding.
/// @param _copyDynamicDataInPlace if true, dynamic types is stored (without length)
/// together with fixed-length data.
/// @param _encodeAsLibraryTypes if true, encodes for a library function, e.g. does not
@@ -98,7 +98,7 @@ public:
void encodeToMemory(
TypePointers const& _givenTypes = {},
TypePointers const& _targetTypes = {},
- bool _padToWordBoundaries = true,
+ bool _padToWords = true,
bool _copyDynamicDataInPlace = false,
bool _encodeAsLibraryTypes = false
);
@@ -109,7 +109,13 @@ public:
/// Stack post: <updated_memptr>
void zeroInitialiseMemoryArray(ArrayType const& _type);
- /// Uses a CALL to the identity contract to perform a memory-to-memory copy.
+ /// Copies full 32 byte words in memory (regions cannot overlap), i.e. may copy more than length.
+ /// Length can be zero, in this case, it copies nothing.
+ /// Stack pre: <size> <target> <source>
+ /// Stack post:
+ void memoryCopy32();
+ /// Copies data in memory (regions cannot overlap).
+ /// Length can be zero, in this case, it copies nothing.
/// Stack pre: <size> <target> <source>
/// Stack post:
void memoryCopy();
@@ -131,7 +137,15 @@ public:
/// If @a _cleanupNeeded, high order bits cleanup is also done if no type conversion would be
/// necessary.
/// If @a _chopSignBits, the function resets the signed bits out of the width of the signed integer.
- void convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded = false, bool _chopSignBits = false);
+ /// If @a _asPartOfArgumentDecoding is true, failed conversions are flagged via REVERT,
+ /// otherwise they are flagged with INVALID.
+ void convertType(
+ Type const& _typeOnStack,
+ Type const& _targetType,
+ bool _cleanupNeeded = false,
+ bool _chopSignBits = false,
+ bool _asPartOfArgumentDecoding = false
+ );
/// Creates a zero-value for the given type and puts it onto the stack. This might allocate
/// memory for memory references.
@@ -162,7 +176,13 @@ public:
static unsigned sizeOnStack(std::vector<T> const& _variables);
static unsigned sizeOnStack(std::vector<std::shared_ptr<Type const>> const& _variableTypes);
- /// Appends code that computes tha SHA3 hash of the topmost stack element of 32 byte type.
+ /// Helper function to shift top value on the stack to the left.
+ void leftShiftNumberOnStack(unsigned _bits);
+
+ /// Helper function to shift top value on the stack to the right.
+ void rightShiftNumberOnStack(unsigned _bits, bool _isSigned = false);
+
+ /// Appends code that computes tha Keccak-256 hash of the topmost stack element of 32 byte type.
void computeHashStatic();
/// Bytes we need to the start of call data.
@@ -185,9 +205,9 @@ private:
void cleanHigherOrderBits(IntegerType const& _typeOnStack);
/// Prepares the given type for storing in memory by shifting it if necessary.
- unsigned prepareMemoryStore(Type const& _type, bool _padToWordBoundaries) const;
+ unsigned prepareMemoryStore(Type const& _type, bool _padToWords);
/// Loads type from memory assuming memory offset is on stack top.
- unsigned loadFromMemoryHelper(Type const& _type, bool _fromCalldata, bool _padToWordBoundaries);
+ unsigned loadFromMemoryHelper(Type const& _type, bool _fromCalldata, bool _padToWords);
CompilerContext& m_context;
};
diff --git a/libsolidity/codegen/ContractCompiler.cpp b/libsolidity/codegen/ContractCompiler.cpp
index a0f196bc..cad388df 100644
--- a/libsolidity/codegen/ContractCompiler.cpp
+++ b/libsolidity/codegen/ContractCompiler.cpp
@@ -21,15 +21,20 @@
*/
#include <libsolidity/codegen/ContractCompiler.h>
-#include <algorithm>
-#include <boost/range/adaptor/reversed.hpp>
-#include <libevmasm/Instruction.h>
-#include <libevmasm/Assembly.h>
-#include <libevmasm/GasMeter.h>
#include <libsolidity/inlineasm/AsmCodeGen.h>
#include <libsolidity/ast/AST.h>
+#include <libsolidity/interface/ErrorReporter.h>
#include <libsolidity/codegen/ExpressionCompiler.h>
#include <libsolidity/codegen/CompilerUtils.h>
+
+#include <libevmasm/Instruction.h>
+#include <libevmasm/Assembly.h>
+#include <libevmasm/GasMeter.h>
+
+#include <boost/range/adaptor/reversed.hpp>
+
+#include <algorithm>
+
using namespace std;
using namespace dev;
using namespace dev::solidity;
@@ -42,7 +47,7 @@ class StackHeightChecker
public:
StackHeightChecker(CompilerContext const& _context):
m_context(_context), stackHeight(m_context.stackHeight()) {}
- void check() { solAssert(m_context.stackHeight() == stackHeight, "I sense a disturbance in the stack."); }
+ void check() { solAssert(m_context.stackHeight() == stackHeight, std::string("I sense a disturbance in the stack: ") + std::to_string(m_context.stackHeight()) + " vs " + std::to_string(stackHeight)); }
private:
CompilerContext const& m_context;
unsigned stackHeight;
@@ -106,7 +111,7 @@ void ContractCompiler::appendCallValueCheck()
{
// Throw if function is not payable but call contained ether.
m_context << Instruction::CALLVALUE;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalRevert();
}
void ContractCompiler::appendInitAndConstructorCode(ContractDefinition const& _contract)
@@ -262,16 +267,22 @@ void ContractCompiler::appendFunctionSelector(ContractDefinition const& _contrac
m_context << notFound;
if (fallback)
{
+ m_context.setStackOffset(0);
if (!fallback->isPayable())
appendCallValueCheck();
+ // Return tag is used to jump out of the function.
eth::AssemblyItem returnTag = m_context.pushNewTag();
fallback->accept(*this);
m_context << returnTag;
- appendReturnValuePacker(FunctionType(*fallback).returnParameterTypes(), _contract.isLibrary());
+ solAssert(FunctionType(*fallback).parameterTypes().empty(), "");
+ solAssert(FunctionType(*fallback).returnParameterTypes().empty(), "");
+ // Return tag gets consumed.
+ m_context.adjustStackOffset(-1);
+ m_context << Instruction::STOP;
}
else
- m_context.appendJumpTo(m_context.errorTag());
+ m_context.appendRevert();
for (auto const& it: interfaceFunctions)
{
@@ -280,16 +291,29 @@ void ContractCompiler::appendFunctionSelector(ContractDefinition const& _contrac
CompilerContext::LocationSetter locationSetter(m_context, functionType->declaration());
m_context << callDataUnpackerEntryPoints.at(it.first);
+ m_context.setStackOffset(0);
// We have to allow this for libraries, because value of the previous
// call is still visible in the delegatecall.
if (!functionType->isPayable() && !_contract.isLibrary())
appendCallValueCheck();
+ // Return tag is used to jump out of the function.
eth::AssemblyItem returnTag = m_context.pushNewTag();
- m_context << CompilerUtils::dataStartOffset;
- appendCalldataUnpacker(functionType->parameterTypes());
+ if (!functionType->parameterTypes().empty())
+ {
+ // Parameter for calldataUnpacker
+ m_context << CompilerUtils::dataStartOffset;
+ appendCalldataUnpacker(functionType->parameterTypes());
+ }
m_context.appendJumpTo(m_context.functionEntryLabel(functionType->declaration()));
m_context << returnTag;
+ // Return tag and input parameters get consumed.
+ m_context.adjustStackOffset(
+ CompilerUtils(m_context).sizeOnStack(functionType->returnParameterTypes()) -
+ CompilerUtils(m_context).sizeOnStack(functionType->parameterTypes()) -
+ 1
+ );
+ // Consumes the return parameters.
appendReturnValuePacker(functionType->returnParameterTypes(), _contract.isLibrary());
}
}
@@ -363,7 +387,7 @@ void ContractCompiler::appendCalldataUnpacker(TypePointers const& _typeParameter
// copy to memory
// move calldata type up again
CompilerUtils(m_context).moveIntoStack(calldataType->sizeOnStack());
- CompilerUtils(m_context).convertType(*calldataType, arrayType);
+ CompilerUtils(m_context).convertType(*calldataType, arrayType, false, false, true);
// fetch next pointer again
CompilerUtils(m_context).moveToStackTop(arrayType.sizeOnStack());
}
@@ -486,7 +510,12 @@ bool ContractCompiler::visit(FunctionDefinition const& _function)
stackLayout.push_back(i);
stackLayout += vector<int>(c_localVariablesSize, -1);
- solAssert(stackLayout.size() <= 17, "Stack too deep, try removing local variables.");
+ if (stackLayout.size() > 17)
+ BOOST_THROW_EXCEPTION(
+ CompilerError() <<
+ errinfo_sourceLocation(_function.location()) <<
+ errinfo_comment("Stack too deep, try removing local variables.")
+ );
while (stackLayout.back() != int(stackLayout.size() - 1))
if (stackLayout.back() < 0)
{
@@ -514,94 +543,133 @@ bool ContractCompiler::visit(FunctionDefinition const& _function)
bool ContractCompiler::visit(InlineAssembly const& _inlineAssembly)
{
- ErrorList errors;
- assembly::CodeGenerator codeGen(_inlineAssembly.operations(), errors);
unsigned startStackHeight = m_context.stackHeight();
- codeGen.assemble(
- m_context.nonConstAssembly(),
- [&](assembly::Identifier const& _identifier, eth::Assembly& _assembly, assembly::CodeGenerator::IdentifierContext _context) {
- auto ref = _inlineAssembly.annotation().externalReferences.find(&_identifier);
- if (ref == _inlineAssembly.annotation().externalReferences.end())
- return false;
- Declaration const* decl = ref->second;
- solAssert(!!decl, "");
- if (_context == assembly::CodeGenerator::IdentifierContext::RValue)
+ julia::ExternalIdentifierAccess identifierAccess;
+ identifierAccess.resolve = [&](assembly::Identifier const& _identifier, julia::IdentifierContext, bool)
+ {
+ auto ref = _inlineAssembly.annotation().externalReferences.find(&_identifier);
+ if (ref == _inlineAssembly.annotation().externalReferences.end())
+ return size_t(-1);
+ return ref->second.valueSize;
+ };
+ identifierAccess.generateCode = [&](assembly::Identifier const& _identifier, julia::IdentifierContext _context, julia::AbstractAssembly& _assembly)
+ {
+ auto ref = _inlineAssembly.annotation().externalReferences.find(&_identifier);
+ solAssert(ref != _inlineAssembly.annotation().externalReferences.end(), "");
+ Declaration const* decl = ref->second.declaration;
+ solAssert(!!decl, "");
+ if (_context == julia::IdentifierContext::RValue)
+ {
+ int const depositBefore = _assembly.stackHeight();
+ solAssert(!!decl->type(), "Type of declaration required but not yet determined.");
+ if (FunctionDefinition const* functionDef = dynamic_cast<FunctionDefinition const*>(decl))
{
- solAssert(!!decl->type(), "Type of declaration required but not yet determined.");
- if (FunctionDefinition const* functionDef = dynamic_cast<FunctionDefinition const*>(decl))
+ solAssert(!ref->second.isOffset && !ref->second.isSlot, "");
+ functionDef = &m_context.resolveVirtualFunction(*functionDef);
+ auto functionEntryLabel = m_context.functionEntryLabel(*functionDef).pushTag();
+ solAssert(functionEntryLabel.data() <= std::numeric_limits<size_t>::max(), "");
+ _assembly.appendLabelReference(size_t(functionEntryLabel.data()));
+ // If there is a runtime context, we have to merge both labels into the same
+ // stack slot in case we store it in storage.
+ if (CompilerContext* rtc = m_context.runtimeContext())
{
- functionDef = &m_context.resolveVirtualFunction(*functionDef);
- _assembly.append(m_context.functionEntryLabel(*functionDef).pushTag());
- // If there is a runtime context, we have to merge both labels into the same
- // stack slot in case we store it in storage.
- if (CompilerContext* rtc = m_context.runtimeContext())
- {
- _assembly.append(u256(1) << 32);
- _assembly.append(Instruction::MUL);
- _assembly.append(rtc->functionEntryLabel(*functionDef).toSubAssemblyTag(m_context.runtimeSub()));
- _assembly.append(Instruction::OR);
- }
+ _assembly.appendConstant(u256(1) << 32);
+ _assembly.appendInstruction(Instruction::MUL);
+ auto runtimeEntryLabel = rtc->functionEntryLabel(*functionDef).toSubAssemblyTag(m_context.runtimeSub());
+ solAssert(runtimeEntryLabel.data() <= std::numeric_limits<size_t>::max(), "");
+ _assembly.appendLabelReference(size_t(runtimeEntryLabel.data()));
+ _assembly.appendInstruction(Instruction::OR);
}
- else if (auto variable = dynamic_cast<VariableDeclaration const*>(decl))
+ }
+ else if (auto variable = dynamic_cast<VariableDeclaration const*>(decl))
+ {
+ solAssert(!variable->isConstant(), "");
+ if (m_context.isStateVariable(decl))
{
- solAssert(!variable->isConstant(), "");
- if (m_context.isLocalVariable(variable))
- {
- int stackDiff = _assembly.deposit() - m_context.baseStackOffsetOfVariable(*variable);
- if (stackDiff < 1 || stackDiff > 16)
- BOOST_THROW_EXCEPTION(
- CompilerError() <<
- errinfo_comment("Stack too deep, try removing local variables.")
- );
- for (unsigned i = 0; i < variable->type()->sizeOnStack(); ++i)
- _assembly.append(dupInstruction(stackDiff));
- }
+ auto const& location = m_context.storageLocationOfVariable(*decl);
+ if (ref->second.isSlot)
+ m_context << location.first;
+ else if (ref->second.isOffset)
+ m_context << u256(location.second);
else
+ solAssert(false, "");
+ }
+ else if (m_context.isLocalVariable(decl))
+ {
+ int stackDiff = _assembly.stackHeight() - m_context.baseStackOffsetOfVariable(*variable);
+ if (ref->second.isSlot || ref->second.isOffset)
{
- solAssert(m_context.isStateVariable(variable), "Invalid variable type.");
- auto const& location = m_context.storageLocationOfVariable(*variable);
- if (!variable->type()->isValueType())
+ solAssert(variable->type()->dataStoredIn(DataLocation::Storage), "");
+ unsigned size = variable->type()->sizeOnStack();
+ if (size == 2)
{
- solAssert(location.second == 0, "Intra-slot offest assumed to be zero.");
- _assembly.append(location.first);
+ // slot plus offset
+ if (ref->second.isOffset)
+ stackDiff--;
}
else
{
- _assembly.append(location.first);
- _assembly.append(u256(location.second));
+ solAssert(size == 1, "");
+ // only slot, offset is zero
+ if (ref->second.isOffset)
+ {
+ _assembly.appendConstant(u256(0));
+ return;
+ }
}
}
- }
- else if (auto contract = dynamic_cast<ContractDefinition const*>(decl))
- {
- solAssert(contract->isLibrary(), "");
- _assembly.appendLibraryAddress(contract->name());
+ else
+ solAssert(variable->type()->sizeOnStack() == 1, "");
+ if (stackDiff < 1 || stackDiff > 16)
+ BOOST_THROW_EXCEPTION(
+ CompilerError() <<
+ errinfo_sourceLocation(_inlineAssembly.location()) <<
+ errinfo_comment("Stack too deep, try removing local variables.")
+ );
+ solAssert(variable->type()->sizeOnStack() == 1, "");
+ _assembly.appendInstruction(dupInstruction(stackDiff));
}
else
- solAssert(false, "Invalid declaration type.");
- } else {
- // lvalue context
- auto variable = dynamic_cast<VariableDeclaration const*>(decl);
- solAssert(
- !!variable && m_context.isLocalVariable(variable),
- "Can only assign to stack variables in inline assembly."
- );
- unsigned size = variable->type()->sizeOnStack();
- int stackDiff = _assembly.deposit() - m_context.baseStackOffsetOfVariable(*variable) - size;
- if (stackDiff > 16 || stackDiff < 1)
- BOOST_THROW_EXCEPTION(
- CompilerError() <<
- errinfo_comment("Stack too deep, try removing local variables.")
- );
- for (unsigned i = 0; i < size; ++i) {
- _assembly.append(swapInstruction(stackDiff));
- _assembly.append(Instruction::POP);
- }
+ solAssert(false, "");
}
- return true;
+ else if (auto contract = dynamic_cast<ContractDefinition const*>(decl))
+ {
+ solAssert(!ref->second.isOffset && !ref->second.isSlot, "");
+ solAssert(contract->isLibrary(), "");
+ _assembly.appendLinkerSymbol(contract->fullyQualifiedName());
+ }
+ else
+ solAssert(false, "Invalid declaration type.");
+ solAssert(_assembly.stackHeight() - depositBefore == int(ref->second.valueSize), "");
+ }
+ else
+ {
+ // lvalue context
+ solAssert(!ref->second.isOffset && !ref->second.isSlot, "");
+ auto variable = dynamic_cast<VariableDeclaration const*>(decl);
+ solAssert(
+ !!variable && m_context.isLocalVariable(variable),
+ "Can only assign to stack variables in inline assembly."
+ );
+ solAssert(variable->type()->sizeOnStack() == 1, "");
+ int stackDiff = _assembly.stackHeight() - m_context.baseStackOffsetOfVariable(*variable) - 1;
+ if (stackDiff > 16 || stackDiff < 1)
+ BOOST_THROW_EXCEPTION(
+ CompilerError() <<
+ errinfo_sourceLocation(_inlineAssembly.location()) <<
+ errinfo_comment("Stack too deep(" + to_string(stackDiff) + "), try removing local variables.")
+ );
+ _assembly.appendInstruction(swapInstruction(stackDiff));
+ _assembly.appendInstruction(Instruction::POP);
}
+ };
+ solAssert(_inlineAssembly.annotation().analysisInfo, "");
+ assembly::CodeGenerator::assemble(
+ _inlineAssembly.operations(),
+ *_inlineAssembly.annotation().analysisInfo,
+ m_context.nonConstAssembly(),
+ identifierAccess
);
- solAssert(Error::containsOnlyWarnings(errors), "Code generation for inline assembly with errors requested.");
m_context.setStackOffset(startStackHeight);
return false;
}
@@ -755,7 +823,8 @@ bool ContractCompiler::visit(Return const& _return)
bool ContractCompiler::visit(Throw const& _throw)
{
CompilerContext::LocationSetter locationSetter(m_context, _throw);
- m_context.appendJumpTo(m_context.errorTag());
+ // Do not send back an error detail.
+ m_context.appendRevert();
return false;
}
@@ -820,6 +889,7 @@ void ContractCompiler::appendMissingFunctions()
function->accept(*this);
solAssert(m_context.nextFunctionToCompile() != function, "Compiled the wrong function?");
}
+ m_context.appendMissingLowLevelFunctions();
}
void ContractCompiler::appendModifierOrFunctionCode()
@@ -827,6 +897,7 @@ void ContractCompiler::appendModifierOrFunctionCode()
solAssert(m_currentFunction, "");
unsigned stackSurplus = 0;
Block const* codeBlock = nullptr;
+ vector<VariableDeclaration const*> addedVariables;
m_modifierDepth++;
@@ -850,13 +921,17 @@ void ContractCompiler::appendModifierOrFunctionCode()
for (unsigned i = 0; i < modifier.parameters().size(); ++i)
{
m_context.addVariable(*modifier.parameters()[i]);
+ addedVariables.push_back(modifier.parameters()[i].get());
compileExpression(
*modifierInvocation->arguments()[i],
modifier.parameters()[i]->annotation().type
);
}
for (VariableDeclaration const* localVariable: modifier.localVariables())
+ {
+ addedVariables.push_back(localVariable);
appendStackVariableInitialisation(*localVariable);
+ }
stackSurplus =
CompilerUtils::sizeOnStack(modifier.parameters()) +
@@ -876,6 +951,8 @@ void ContractCompiler::appendModifierOrFunctionCode()
m_returnTags.pop_back();
CompilerUtils(m_context).popStackSlots(stackSurplus);
+ for (auto var: addedVariables)
+ m_context.removeVariable(*var);
}
m_modifierDepth--;
}
@@ -910,7 +987,9 @@ eth::AssemblyPointer ContractCompiler::cloneRuntime()
a << Instruction::DELEGATECALL;
//Propagate error condition (if DELEGATECALL pushes 0 on stack).
a << Instruction::ISZERO;
- a.appendJumpI(a.errorTag());
+ a << Instruction::ISZERO;
+ eth::AssemblyItem afterTag = a.appendJumpI().tag();
+ a << Instruction::INVALID << afterTag;
//@todo adjust for larger return values, make this dynamic.
a << u256(0x20) << u256(0) << Instruction::RETURN;
return make_shared<eth::Assembly>(a);
diff --git a/libsolidity/codegen/ExpressionCompiler.cpp b/libsolidity/codegen/ExpressionCompiler.cpp
index 5748d818..82518e8c 100644
--- a/libsolidity/codegen/ExpressionCompiler.cpp
+++ b/libsolidity/codegen/ExpressionCompiler.cpp
@@ -32,6 +32,7 @@
#include <libsolidity/codegen/CompilerUtils.h>
#include <libsolidity/codegen/LValue.h>
#include <libevmasm/GasMeter.h>
+
using namespace std;
namespace dev
@@ -87,6 +88,7 @@ void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const&
FunctionType accessorType(_varDecl);
TypePointers paramTypes = accessorType.parameterTypes();
+ m_context.adjustStackOffset(1 + CompilerUtils::sizeOnStack(paramTypes));
// retrieve the position of the variable
auto const& location = m_context.storageLocationOfVariable(_varDecl);
@@ -110,7 +112,7 @@ void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const&
// move key to memory.
utils().copyToStackTop(paramTypes.size() - i, 1);
utils().storeInMemory(0);
- m_context << u256(64) << u256(0) << Instruction::SHA3;
+ m_context << u256(64) << u256(0) << Instruction::KECCAK256;
// push offset
m_context << u256(0);
returnType = mappingType->valueType();
@@ -197,21 +199,40 @@ bool ExpressionCompiler::visit(Conditional const& _condition)
bool ExpressionCompiler::visit(Assignment const& _assignment)
{
CompilerContext::LocationSetter locationSetter(m_context, _assignment);
+ Token::Value op = _assignment.assignmentOperator();
+ Token::Value binOp = op == Token::Assign ? op : Token::AssignmentToBinaryOp(op);
+ Type const& leftType = *_assignment.leftHandSide().annotation().type;
+ if (leftType.category() == Type::Category::Tuple)
+ {
+ solAssert(*_assignment.annotation().type == TupleType(), "");
+ solAssert(op == Token::Assign, "");
+ }
+ else
+ solAssert(*_assignment.annotation().type == leftType, "");
+ bool cleanupNeeded = false;
+ if (op != Token::Assign)
+ cleanupNeeded = cleanupNeededForOp(leftType.category(), binOp);
_assignment.rightHandSide().accept(*this);
// Perform some conversion already. This will convert storage types to memory and literals
// to their actual type, but will not convert e.g. memory to storage.
- TypePointer type = _assignment.rightHandSide().annotation().type->closestTemporaryType(
- _assignment.leftHandSide().annotation().type
- );
- utils().convertType(*_assignment.rightHandSide().annotation().type, *type);
+ TypePointer rightIntermediateType;
+ if (op != Token::Assign && Token::isShiftOp(binOp))
+ rightIntermediateType = _assignment.rightHandSide().annotation().type->mobileType();
+ else
+ rightIntermediateType = _assignment.rightHandSide().annotation().type->closestTemporaryType(
+ _assignment.leftHandSide().annotation().type
+ );
+ solAssert(rightIntermediateType, "");
+ utils().convertType(*_assignment.rightHandSide().annotation().type, *rightIntermediateType, cleanupNeeded);
_assignment.leftHandSide().accept(*this);
solAssert(!!m_currentLValue, "LValue not retrieved.");
- Token::Value op = _assignment.assignmentOperator();
- if (op != Token::Assign) // compound assignment
+ if (op == Token::Assign)
+ m_currentLValue->storeValue(*rightIntermediateType, _assignment.location());
+ else // compound assignment
{
- solUnimplementedAssert(_assignment.annotation().type->isValueType(), "Compound operators not implemented for non-value types.");
+ solAssert(leftType.isValueType(), "Compound operators only available for value types.");
unsigned lvalueSize = m_currentLValue->sizeOnStack();
unsigned itemSize = _assignment.annotation().type->sizeOnStack();
if (lvalueSize > 0)
@@ -221,16 +242,29 @@ bool ExpressionCompiler::visit(Assignment const& _assignment)
// value lvalue_ref value lvalue_ref
}
m_currentLValue->retrieveValue(_assignment.location(), true);
- appendOrdinaryBinaryOperatorCode(Token::AssignmentToBinaryOp(op), *_assignment.annotation().type);
+ utils().convertType(leftType, leftType, cleanupNeeded);
+
+ if (Token::isShiftOp(binOp))
+ appendShiftOperatorCode(binOp, leftType, *rightIntermediateType);
+ else
+ {
+ solAssert(leftType == *rightIntermediateType, "");
+ appendOrdinaryBinaryOperatorCode(binOp, leftType);
+ }
if (lvalueSize > 0)
{
- solAssert(itemSize + lvalueSize <= 16, "Stack too deep, try removing local variables.");
+ if (itemSize + lvalueSize > 16)
+ BOOST_THROW_EXCEPTION(
+ CompilerError() <<
+ errinfo_sourceLocation(_assignment.location()) <<
+ errinfo_comment("Stack too deep, try removing local variables.")
+ );
// value [lvalue_ref] updated_value
for (unsigned i = 0; i < itemSize; ++i)
m_context << swapInstruction(itemSize + lvalueSize) << Instruction::POP;
}
+ m_currentLValue->storeValue(*_assignment.annotation().type, _assignment.location());
}
- m_currentLValue->storeValue(*type, _assignment.location());
m_currentLValue.reset();
return false;
}
@@ -351,20 +385,20 @@ bool ExpressionCompiler::visit(BinaryOperation const& _binaryOperation)
Expression const& leftExpression = _binaryOperation.leftExpression();
Expression const& rightExpression = _binaryOperation.rightExpression();
solAssert(!!_binaryOperation.annotation().commonType, "");
- Type const& commonType = *_binaryOperation.annotation().commonType;
+ TypePointer const& commonType = _binaryOperation.annotation().commonType;
Token::Value const c_op = _binaryOperation.getOperator();
if (c_op == Token::And || c_op == Token::Or) // special case: short-circuiting
appendAndOrOperatorCode(_binaryOperation);
- else if (commonType.category() == Type::Category::RationalNumber)
- m_context << commonType.literalValue(nullptr);
+ else if (commonType->category() == Type::Category::RationalNumber)
+ m_context << commonType->literalValue(nullptr);
else
{
- bool cleanupNeeded = false;
- if (Token::isCompareOp(c_op))
- cleanupNeeded = true;
- if (commonType.category() == Type::Category::Integer && (c_op == Token::Div || c_op == Token::Mod))
- cleanupNeeded = true;
+ bool cleanupNeeded = cleanupNeededForOp(commonType->category(), c_op);
+
+ TypePointer leftTargetType = commonType;
+ TypePointer rightTargetType = Token::isShiftOp(c_op) ? rightExpression.annotation().type->mobileType() : commonType;
+ solAssert(rightTargetType, "");
// for commutative operators, push the literal as late as possible to allow improved optimization
auto isLiteral = [](Expression const& _e)
@@ -375,21 +409,24 @@ bool ExpressionCompiler::visit(BinaryOperation const& _binaryOperation)
if (swap)
{
leftExpression.accept(*this);
- utils().convertType(*leftExpression.annotation().type, commonType, cleanupNeeded);
+ utils().convertType(*leftExpression.annotation().type, *leftTargetType, cleanupNeeded);
rightExpression.accept(*this);
- utils().convertType(*rightExpression.annotation().type, commonType, cleanupNeeded);
+ utils().convertType(*rightExpression.annotation().type, *rightTargetType, cleanupNeeded);
}
else
{
rightExpression.accept(*this);
- utils().convertType(*rightExpression.annotation().type, commonType, cleanupNeeded);
+ utils().convertType(*rightExpression.annotation().type, *rightTargetType, cleanupNeeded);
leftExpression.accept(*this);
- utils().convertType(*leftExpression.annotation().type, commonType, cleanupNeeded);
+ utils().convertType(*leftExpression.annotation().type, *leftTargetType, cleanupNeeded);
}
- if (Token::isCompareOp(c_op))
- appendCompareOperatorCode(c_op, commonType);
+ if (Token::isShiftOp(c_op))
+ // shift only cares about the signedness of both sides
+ appendShiftOperatorCode(c_op, *leftTargetType, *rightTargetType);
+ else if (Token::isCompareOp(c_op))
+ appendCompareOperatorCode(c_op, *commonType);
else
- appendOrdinaryBinaryOperatorCode(c_op, commonType);
+ appendOrdinaryBinaryOperatorCode(c_op, *commonType);
}
// do not visit the child nodes, we already did that explicitly
@@ -399,8 +436,7 @@ bool ExpressionCompiler::visit(BinaryOperation const& _binaryOperation)
bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
{
CompilerContext::LocationSetter locationSetter(m_context, _functionCall);
- using Location = FunctionType::Location;
- if (_functionCall.annotation().isTypeConversion)
+ if (_functionCall.annotation().kind == FunctionCallKind::TypeConversion)
{
solAssert(_functionCall.arguments().size() == 1, "");
solAssert(_functionCall.names().empty(), "");
@@ -411,7 +447,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
}
FunctionTypePointer functionType;
- if (_functionCall.annotation().isStructConstructorCall)
+ if (_functionCall.annotation().kind == FunctionCallKind::StructConstructorCall)
{
auto const& type = dynamic_cast<TypeType const&>(*_functionCall.expression().annotation().type);
auto const& structType = dynamic_cast<StructType const&>(*type.actualType());
@@ -442,7 +478,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
solAssert(found, "");
}
- if (_functionCall.annotation().isStructConstructorCall)
+ if (_functionCall.annotation().kind == FunctionCallKind::StructConstructorCall)
{
TypeType const& type = dynamic_cast<TypeType const&>(*_functionCall.expression().annotation().type);
auto const& structType = dynamic_cast<StructType const&>(*type.actualType());
@@ -464,10 +500,10 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
FunctionType const& function = *functionType;
if (function.bound())
// Only delegatecall and internal functions can be bound, this might be lifted later.
- solAssert(function.location() == Location::DelegateCall || function.location() == Location::Internal, "");
- switch (function.location())
+ solAssert(function.kind() == FunctionType::Kind::DelegateCall || function.kind() == FunctionType::Kind::Internal, "");
+ switch (function.kind())
{
- case Location::Internal:
+ case FunctionType::Kind::Internal:
{
// Calling convention: Caller pushes return address and arguments
// Callee removes them and pushes return values
@@ -490,7 +526,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
if (m_context.runtimeContext())
// We have a runtime context, so we need the creation part.
- m_context << (u256(1) << 32) << Instruction::SWAP1 << Instruction::DIV;
+ utils().rightShiftNumberOnStack(32, false);
else
// Extract the runtime part.
m_context << ((u256(1) << 32) - 1) << Instruction::AND;
@@ -503,16 +539,16 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context.adjustStackOffset(returnParametersSize - parameterSize - 1);
break;
}
- case Location::External:
- case Location::CallCode:
- case Location::DelegateCall:
- case Location::Bare:
- case Location::BareCallCode:
- case Location::BareDelegateCall:
+ case FunctionType::Kind::External:
+ case FunctionType::Kind::CallCode:
+ case FunctionType::Kind::DelegateCall:
+ case FunctionType::Kind::Bare:
+ case FunctionType::Kind::BareCallCode:
+ case FunctionType::Kind::BareDelegateCall:
_functionCall.expression().accept(*this);
appendExternalFunctionCall(function, arguments);
break;
- case Location::Creation:
+ case FunctionType::Kind::Creation:
{
_functionCall.expression().accept(*this);
solAssert(!function.gasSet(), "Gas limit set for contract creation.");
@@ -523,20 +559,24 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
arg->accept(*this);
argumentTypes.push_back(arg->annotation().type);
}
- ContractDefinition const& contract =
- dynamic_cast<ContractType const&>(*function.returnParameterTypes().front()).contractDefinition();
- // copy the contract's code into memory
- eth::Assembly const& assembly = m_context.compiledContract(contract);
- utils().fetchFreeMemoryPointer();
- // TODO we create a copy here, which is actually what we want.
- // This should be revisited at the point where we fix
- // https://github.com/ethereum/solidity/issues/1092
- // pushes size
- auto subroutine = m_context.addSubroutine(make_shared<eth::Assembly>(assembly));
- m_context << Instruction::DUP1 << subroutine;
- m_context << Instruction::DUP4 << Instruction::CODECOPY;
-
- m_context << Instruction::ADD;
+ ContractDefinition const* contract =
+ &dynamic_cast<ContractType const&>(*function.returnParameterTypes().front()).contractDefinition();
+ m_context.callLowLevelFunction(
+ "$copyContractCreationCodeToMemory_" + contract->type()->identifier(),
+ 0,
+ 1,
+ [contract](CompilerContext& _context)
+ {
+ // copy the contract's code into memory
+ eth::Assembly const& assembly = _context.compiledContract(*contract);
+ CompilerUtils(_context).fetchFreeMemoryPointer();
+ // pushes size
+ auto subroutine = _context.addSubroutine(make_shared<eth::Assembly>(assembly));
+ _context << Instruction::DUP1 << subroutine;
+ _context << Instruction::DUP4 << Instruction::CODECOPY;
+ _context << Instruction::ADD;
+ }
+ );
utils().encodeToMemory(argumentTypes, function.parameterTypes());
// now on stack: memory_end_ptr
// need: size, offset, endowment
@@ -548,12 +588,12 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << Instruction::CREATE;
// Check if zero (out of stack or not enough balance).
m_context << Instruction::DUP1 << Instruction::ISZERO;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalRevert();
if (function.valueSet())
m_context << swapInstruction(1) << Instruction::POP;
break;
}
- case Location::SetGas:
+ case FunctionType::Kind::SetGas:
{
// stack layout: contract_address function_id [gas] [value]
_functionCall.expression().accept(*this);
@@ -569,7 +609,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << Instruction::POP;
break;
}
- case Location::SetValue:
+ case FunctionType::Kind::SetValue:
// stack layout: contract_address function_id [gas] [value]
_functionCall.expression().accept(*this);
// Note that function is not the original function, but the ".value" function.
@@ -578,7 +618,8 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << Instruction::POP;
arguments.front()->accept(*this);
break;
- case Location::Send:
+ case FunctionType::Kind::Send:
+ case FunctionType::Kind::Transfer:
_functionCall.expression().accept(*this);
// Provide the gas stipend manually at first because we may send zero ether.
// Will be zeroed if we send more than zero ether.
@@ -597,7 +638,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
TypePointers{},
strings(),
strings(),
- Location::Bare,
+ FunctionType::Kind::Bare,
false,
nullptr,
false,
@@ -607,13 +648,22 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
),
{}
);
+ if (function.kind() == FunctionType::Kind::Transfer)
+ {
+ // Check if zero (out of stack or not enough balance).
+ m_context << Instruction::ISZERO;
+ m_context.appendConditionalRevert();
+ }
break;
- case Location::Selfdestruct:
+ case FunctionType::Kind::Selfdestruct:
arguments.front()->accept(*this);
utils().convertType(*arguments.front()->annotation().type, *function.parameterTypes().front(), true);
- m_context << Instruction::SUICIDE;
+ m_context << Instruction::SELFDESTRUCT;
+ break;
+ case FunctionType::Kind::Revert:
+ m_context.appendRevert();
break;
- case Location::SHA3:
+ case FunctionType::Kind::SHA3:
{
TypePointers argumentTypes;
for (auto const& arg: arguments)
@@ -624,16 +674,16 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
utils().fetchFreeMemoryPointer();
utils().encodeToMemory(argumentTypes, TypePointers(), function.padArguments(), true);
utils().toSizeAfterFreeMemoryPointer();
- m_context << Instruction::SHA3;
+ m_context << Instruction::KECCAK256;
break;
}
- case Location::Log0:
- case Location::Log1:
- case Location::Log2:
- case Location::Log3:
- case Location::Log4:
+ case FunctionType::Kind::Log0:
+ case FunctionType::Kind::Log1:
+ case FunctionType::Kind::Log2:
+ case FunctionType::Kind::Log3:
+ case FunctionType::Kind::Log4:
{
- unsigned logNumber = int(function.location()) - int(Location::Log0);
+ unsigned logNumber = int(function.kind()) - int(FunctionType::Kind::Log0);
for (unsigned arg = logNumber; arg > 0; --arg)
{
arguments[arg]->accept(*this);
@@ -650,7 +700,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << logInstruction(logNumber);
break;
}
- case Location::Event:
+ case FunctionType::Kind::Event:
{
_functionCall.expression().accept(*this);
auto const& event = dynamic_cast<EventDefinition const&>(function.declaration());
@@ -671,7 +721,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
true
);
utils().toSizeAfterFreeMemoryPointer();
- m_context << Instruction::SHA3;
+ m_context << Instruction::KECCAK256;
}
else
utils().convertType(
@@ -704,50 +754,50 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << logInstruction(numIndexed);
break;
}
- case Location::BlockHash:
+ case FunctionType::Kind::BlockHash:
{
arguments[0]->accept(*this);
utils().convertType(*arguments[0]->annotation().type, *function.parameterTypes()[0], true);
m_context << Instruction::BLOCKHASH;
break;
}
- case Location::AddMod:
- case Location::MulMod:
+ case FunctionType::Kind::AddMod:
+ case FunctionType::Kind::MulMod:
{
for (unsigned i = 0; i < 3; i ++)
{
arguments[2 - i]->accept(*this);
utils().convertType(*arguments[2 - i]->annotation().type, IntegerType(256));
}
- if (function.location() == Location::AddMod)
+ if (function.kind() == FunctionType::Kind::AddMod)
m_context << Instruction::ADDMOD;
else
m_context << Instruction::MULMOD;
break;
}
- case Location::ECRecover:
- case Location::SHA256:
- case Location::RIPEMD160:
+ case FunctionType::Kind::ECRecover:
+ case FunctionType::Kind::SHA256:
+ case FunctionType::Kind::RIPEMD160:
{
_functionCall.expression().accept(*this);
- static const map<Location, u256> contractAddresses{{Location::ECRecover, 1},
- {Location::SHA256, 2},
- {Location::RIPEMD160, 3}};
- m_context << contractAddresses.find(function.location())->second;
+ static const map<FunctionType::Kind, u256> contractAddresses{{FunctionType::Kind::ECRecover, 1},
+ {FunctionType::Kind::SHA256, 2},
+ {FunctionType::Kind::RIPEMD160, 3}};
+ m_context << contractAddresses.find(function.kind())->second;
for (unsigned i = function.sizeOnStack(); i > 0; --i)
m_context << swapInstruction(i);
appendExternalFunctionCall(function, arguments);
break;
}
- case Location::ByteArrayPush:
- case Location::ArrayPush:
+ case FunctionType::Kind::ByteArrayPush:
+ case FunctionType::Kind::ArrayPush:
{
_functionCall.expression().accept(*this);
solAssert(function.parameterTypes().size() == 1, "");
solAssert(!!function.parameterTypes()[0], "");
TypePointer paramType = function.parameterTypes()[0];
shared_ptr<ArrayType> arrayType =
- function.location() == Location::ArrayPush ?
+ function.kind() == FunctionType::Kind::ArrayPush ?
make_shared<ArrayType>(DataLocation::Storage, paramType) :
make_shared<ArrayType>(DataLocation::Storage);
// get the current length
@@ -766,17 +816,18 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
arguments[0]->accept(*this);
// stack: newLength storageSlot slotOffset argValue
TypePointer type = arguments[0]->annotation().type->closestTemporaryType(arrayType->baseType());
+ solAssert(type, "");
utils().convertType(*arguments[0]->annotation().type, *type);
utils().moveToStackTop(1 + type->sizeOnStack());
utils().moveToStackTop(1 + type->sizeOnStack());
// stack: newLength argValue storageSlot slotOffset
- if (function.location() == Location::ArrayPush)
+ if (function.kind() == FunctionType::Kind::ArrayPush)
StorageItem(m_context, *paramType).storeValue(*type, _functionCall.location(), true);
else
StorageByteArrayElement(m_context).storeValue(*type, _functionCall.location(), true);
break;
}
- case Location::ObjectCreation:
+ case FunctionType::Kind::ObjectCreation:
{
// Will allocate at the end of memory (MSIZE) and not write at all unless the base
// type is dynamically sized.
@@ -826,6 +877,23 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << Instruction::POP;
break;
}
+ case FunctionType::Kind::Assert:
+ case FunctionType::Kind::Require:
+ {
+ arguments.front()->accept(*this);
+ utils().convertType(*arguments.front()->annotation().type, *function.parameterTypes().front(), false);
+ // jump if condition was met
+ m_context << Instruction::ISZERO << Instruction::ISZERO;
+ auto success = m_context.appendConditionalJump();
+ if (function.kind() == FunctionType::Kind::Assert)
+ // condition was not met, flag an error
+ m_context.appendInvalid();
+ else
+ m_context.appendRevert();
+ // the success branch
+ m_context << success;
+ break;
+ }
default:
BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment("Invalid function type."));
}
@@ -853,7 +921,7 @@ bool ExpressionCompiler::visit(MemberAccess const& _memberAccess)
*funType->selfType(),
true
);
- if (funType->location() == FunctionType::Location::Internal)
+ if (funType->kind() == FunctionType::Kind::Internal)
{
FunctionDefinition const& funDef = dynamic_cast<decltype(funDef)>(funType->declaration());
utils().pushCombinedFunctionEntryLabel(funDef);
@@ -861,10 +929,10 @@ bool ExpressionCompiler::visit(MemberAccess const& _memberAccess)
}
else
{
- solAssert(funType->location() == FunctionType::Location::DelegateCall, "");
+ solAssert(funType->kind() == FunctionType::Kind::DelegateCall, "");
auto contract = dynamic_cast<ContractDefinition const*>(funType->declaration().scope());
solAssert(contract && contract->isLibrary(), "");
- m_context.appendLibraryAddress(contract->name());
+ m_context.appendLibraryAddress(contract->fullyQualifiedName());
m_context << funType->externalIdentifier();
utils().moveIntoStack(funType->selfType()->sizeOnStack(), 2);
}
@@ -880,19 +948,44 @@ bool ExpressionCompiler::visit(MemberAccess const& _memberAccess)
solAssert(_memberAccess.annotation().type, "_memberAccess has no type");
if (auto funType = dynamic_cast<FunctionType const*>(_memberAccess.annotation().type.get()))
{
- if (funType->location() != FunctionType::Location::Internal)
- {
- _memberAccess.expression().accept(*this);
- m_context << funType->externalIdentifier();
- }
- else
+ switch (funType->kind())
{
+ case FunctionType::Kind::Internal:
// We do not visit the expression here on purpose, because in the case of an
// internal library function call, this would push the library address forcing
// us to link against it although we actually do not need it.
- auto const* function = dynamic_cast<FunctionDefinition const*>(_memberAccess.annotation().referencedDeclaration);
- solAssert(!!function, "Function not found in member access");
- utils().pushCombinedFunctionEntryLabel(*function);
+ if (auto const* function = dynamic_cast<FunctionDefinition const*>(_memberAccess.annotation().referencedDeclaration))
+ utils().pushCombinedFunctionEntryLabel(*function);
+ else
+ solAssert(false, "Function not found in member access");
+ break;
+ case FunctionType::Kind::Event:
+ if (!dynamic_cast<EventDefinition const*>(_memberAccess.annotation().referencedDeclaration))
+ solAssert(false, "event not found");
+ // no-op, because the parent node will do the job
+ break;
+ case FunctionType::Kind::External:
+ case FunctionType::Kind::Creation:
+ case FunctionType::Kind::DelegateCall:
+ case FunctionType::Kind::CallCode:
+ case FunctionType::Kind::Send:
+ case FunctionType::Kind::Bare:
+ case FunctionType::Kind::BareCallCode:
+ case FunctionType::Kind::BareDelegateCall:
+ case FunctionType::Kind::Transfer:
+ _memberAccess.expression().accept(*this);
+ m_context << funType->externalIdentifier();
+ break;
+ case FunctionType::Kind::Log0:
+ case FunctionType::Kind::Log1:
+ case FunctionType::Kind::Log2:
+ case FunctionType::Kind::Log3:
+ case FunctionType::Kind::Log4:
+ case FunctionType::Kind::ECRecover:
+ case FunctionType::Kind::SHA256:
+ case FunctionType::Kind::RIPEMD160:
+ default:
+ solAssert(false, "unsupported member function");
}
}
else if (dynamic_cast<TypeType const*>(_memberAccess.annotation().type.get()))
@@ -961,7 +1054,7 @@ bool ExpressionCompiler::visit(MemberAccess const& _memberAccess)
);
m_context << Instruction::BALANCE;
}
- else if ((set<string>{"send", "call", "callcode", "delegatecall"}).count(member))
+ else if ((set<string>{"send", "transfer", "call", "callcode", "delegatecall"}).count(member))
utils().convertType(
*_memberAccess.expression().annotation().type,
IntegerType(0, IntegerType::Modifier::Address),
@@ -1121,7 +1214,7 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess)
utils().storeInMemoryDynamic(IntegerType(256));
m_context << u256(0);
}
- m_context << Instruction::SHA3;
+ m_context << Instruction::KECCAK256;
m_context << u256(0);
setLValueToStorageItem(_indexAccess);
}
@@ -1173,10 +1266,10 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess)
m_context << u256(fixedBytesType.numBytes());
m_context << Instruction::DUP2 << Instruction::LT << Instruction::ISZERO;
// out-of-bounds access throws exception
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalInvalid();
m_context << Instruction::BYTE;
- m_context << (u256(1) << (256 - 8)) << Instruction::MUL;
+ utils().leftShiftNumberOnStack(256 - 8);
}
else if (baseType.category() == Type::Category::TypeType)
{
@@ -1218,7 +1311,7 @@ void ExpressionCompiler::endVisit(Identifier const& _identifier)
else if (auto contract = dynamic_cast<ContractDefinition const*>(declaration))
{
if (contract->isLibrary())
- m_context.appendLibraryAddress(contract->name());
+ m_context.appendLibraryAddress(contract->fullyQualifiedName());
}
else if (dynamic_cast<EventDefinition const*>(declaration))
{
@@ -1247,6 +1340,7 @@ void ExpressionCompiler::endVisit(Literal const& _literal)
{
case Type::Category::RationalNumber:
case Type::Category::Bool:
+ case Type::Category::Integer:
m_context << type->literalValue(&_literal);
break;
case Type::Category::StringLiteral:
@@ -1273,11 +1367,12 @@ void ExpressionCompiler::appendAndOrOperatorCode(BinaryOperation const& _binaryO
void ExpressionCompiler::appendCompareOperatorCode(Token::Value _operator, Type const& _type)
{
+ solAssert(_type.sizeOnStack() == 1, "Comparison of multi-slot types.");
if (_operator == Token::Equal || _operator == Token::NotEqual)
{
if (FunctionType const* funType = dynamic_cast<decltype(funType)>(&_type))
{
- if (funType->location() == FunctionType::Location::Internal)
+ if (funType->kind() == FunctionType::Kind::Internal)
{
// We have to remove the upper bits (construction time value) because they might
// be "unknown" in one of the operands and not in the other.
@@ -1326,8 +1421,6 @@ void ExpressionCompiler::appendOrdinaryBinaryOperatorCode(Token::Value _operator
appendArithmeticOperatorCode(_operator, _type);
else if (Token::isBitOp(_operator))
appendBitOperatorCode(_operator);
- else if (Token::isShiftOp(_operator))
- appendShiftOperatorCode(_operator);
else
BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment("Unknown binary operator."));
}
@@ -1356,7 +1449,7 @@ void ExpressionCompiler::appendArithmeticOperatorCode(Token::Value _operator, Ty
{
// Test for division by zero
m_context << Instruction::DUP2 << Instruction::ISZERO;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalInvalid();
if (_operator == Token::Div)
m_context << (c_isSigned ? Instruction::SDIV : Instruction::DIV);
@@ -1390,17 +1483,45 @@ void ExpressionCompiler::appendBitOperatorCode(Token::Value _operator)
}
}
-void ExpressionCompiler::appendShiftOperatorCode(Token::Value _operator)
+void ExpressionCompiler::appendShiftOperatorCode(Token::Value _operator, Type const& _valueType, Type const& _shiftAmountType)
{
- solUnimplemented("Shift operators not yet implemented.");
+ // stack: shift_amount value_to_shift
+
+ bool c_valueSigned = false;
+ if (auto valueType = dynamic_cast<IntegerType const*>(&_valueType))
+ c_valueSigned = valueType->isSigned();
+ else
+ solAssert(dynamic_cast<FixedBytesType const*>(&_valueType), "Only integer and fixed bytes type supported for shifts.");
+
+ // The amount can be a RationalNumberType too.
+ bool c_amountSigned = false;
+ if (auto amountType = dynamic_cast<RationalNumberType const*>(&_shiftAmountType))
+ {
+ // This should be handled by the type checker.
+ solAssert(amountType->integerType(), "");
+ solAssert(!amountType->integerType()->isSigned(), "");
+ }
+ else if (auto amountType = dynamic_cast<IntegerType const*>(&_shiftAmountType))
+ c_amountSigned = amountType->isSigned();
+ else
+ solAssert(false, "Invalid shift amount type.");
+
+ // shift by negative amount throws exception
+ if (c_amountSigned)
+ {
+ m_context << u256(0) << Instruction::DUP3 << Instruction::SLT;
+ m_context.appendConditionalInvalid();
+ }
+
switch (_operator)
{
case Token::SHL:
+ m_context << Instruction::SWAP1 << u256(2) << Instruction::EXP << Instruction::MUL;
break;
case Token::SAR:
+ m_context << Instruction::SWAP1 << u256(2) << Instruction::EXP << Instruction::SWAP1 << (c_valueSigned ? Instruction::SDIV : Instruction::DIV);
break;
case Token::SHR:
- break;
default:
BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment("Unknown shift operator."));
}
@@ -1434,11 +1555,10 @@ void ExpressionCompiler::appendExternalFunctionCall(
if (_functionType.bound())
utils().moveToStackTop(gasValueSize, _functionType.selfType()->sizeOnStack());
- using FunctionKind = FunctionType::Location;
- FunctionKind funKind = _functionType.location();
- bool returnSuccessCondition = funKind == FunctionKind::Bare || funKind == FunctionKind::BareCallCode;
- bool isCallCode = funKind == FunctionKind::BareCallCode || funKind == FunctionKind::CallCode;
- bool isDelegateCall = funKind == FunctionKind::BareDelegateCall || funKind == FunctionKind::DelegateCall;
+ auto funKind = _functionType.kind();
+ bool returnSuccessCondition = funKind == FunctionType::Kind::Bare || funKind == FunctionType::Kind::BareCallCode;
+ bool isCallCode = funKind == FunctionType::Kind::BareCallCode || funKind == FunctionType::Kind::CallCode;
+ bool isDelegateCall = funKind == FunctionType::Kind::BareDelegateCall || funKind == FunctionType::Kind::DelegateCall;
unsigned retSize = 0;
if (returnSuccessCondition)
@@ -1455,7 +1575,7 @@ void ExpressionCompiler::appendExternalFunctionCall(
TypePointers parameterTypes = _functionType.parameterTypes();
bool manualFunctionId = false;
if (
- (funKind == FunctionKind::Bare || funKind == FunctionKind::BareCallCode || funKind == FunctionKind::BareDelegateCall) &&
+ (funKind == FunctionType::Kind::Bare || funKind == FunctionType::Kind::BareCallCode || funKind == FunctionType::Kind::BareDelegateCall) &&
!_arguments.empty()
)
{
@@ -1490,7 +1610,7 @@ void ExpressionCompiler::appendExternalFunctionCall(
argumentTypes.push_back(_arguments[i]->annotation().type);
}
- if (funKind == FunctionKind::ECRecover)
+ if (funKind == FunctionType::Kind::ECRecover)
{
// Clears 32 bytes of currently free memory and advances free memory pointer.
// Output area will be "start of input area" - 32.
@@ -1546,7 +1666,7 @@ void ExpressionCompiler::appendExternalFunctionCall(
// put on stack: <size of output> <memory pos of output> <size of input> <memory pos of input>
m_context << u256(retSize);
utils().fetchFreeMemoryPointer(); // This is the start of input
- if (funKind == FunctionKind::ECRecover)
+ if (funKind == FunctionType::Kind::ECRecover)
{
// In this case, output is 32 bytes before input and has already been cleared.
m_context << u256(32) << Instruction::DUP2 << Instruction::SUB << Instruction::SWAP1;
@@ -1572,10 +1692,10 @@ void ExpressionCompiler::appendExternalFunctionCall(
bool existenceChecked = false;
// Check the the target contract exists (has code) for non-low-level calls.
- if (funKind == FunctionKind::External || funKind == FunctionKind::CallCode || funKind == FunctionKind::DelegateCall)
+ if (funKind == FunctionType::Kind::External || funKind == FunctionType::Kind::CallCode || funKind == FunctionType::Kind::DelegateCall)
{
m_context << Instruction::DUP1 << Instruction::EXTCODESIZE << Instruction::ISZERO;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalRevert();
existenceChecked = true;
}
@@ -1611,7 +1731,7 @@ void ExpressionCompiler::appendExternalFunctionCall(
{
//Propagate error condition (if CALL pushes 0 on stack).
m_context << Instruction::ISZERO;
- m_context.appendConditionalJumpTo(m_context.errorTag());
+ m_context.appendConditionalRevert();
}
utils().popStackSlots(remainsSize);
@@ -1620,14 +1740,14 @@ void ExpressionCompiler::appendExternalFunctionCall(
{
// already there
}
- else if (funKind == FunctionKind::RIPEMD160)
+ else if (funKind == FunctionType::Kind::RIPEMD160)
{
// fix: built-in contract returns right-aligned data
utils().fetchFreeMemoryPointer();
utils().loadFromMemoryDynamic(IntegerType(160), false, true, false);
utils().convertType(IntegerType(160), FixedBytesType(20));
}
- else if (funKind == FunctionKind::ECRecover)
+ else if (funKind == FunctionType::Kind::ECRecover)
{
// Output is 32 bytes before input / free mem pointer.
// Failing ecrecover cannot be detected, so we clear output before the call.
@@ -1688,6 +1808,16 @@ void ExpressionCompiler::setLValueToStorageItem(Expression const& _expression)
setLValue<StorageItem>(_expression, *_expression.annotation().type);
}
+bool ExpressionCompiler::cleanupNeededForOp(Type::Category _type, Token::Value _op)
+{
+ if (Token::isCompareOp(_op) || Token::isShiftOp(_op))
+ return true;
+ else if (_type == Type::Category::Integer && (_op == Token::Div || _op == Token::Mod))
+ return true;
+ else
+ return false;
+}
+
CompilerUtils ExpressionCompiler::utils()
{
return CompilerUtils(m_context);
diff --git a/libsolidity/codegen/ExpressionCompiler.h b/libsolidity/codegen/ExpressionCompiler.h
index f08bded9..3b8cf1c6 100644
--- a/libsolidity/codegen/ExpressionCompiler.h
+++ b/libsolidity/codegen/ExpressionCompiler.h
@@ -28,7 +28,7 @@
#include <libevmasm/SourceLocation.h>
#include <libsolidity/ast/ASTVisitor.h>
#include <libsolidity/codegen/LValue.h>
-#include <libsolidity/interface/Utils.h>
+#include <libsolidity/interface/Exceptions.h>
namespace dev {
namespace eth
@@ -91,7 +91,7 @@ private:
void appendArithmeticOperatorCode(Token::Value _operator, Type const& _type);
void appendBitOperatorCode(Token::Value _operator);
- void appendShiftOperatorCode(Token::Value _operator);
+ void appendShiftOperatorCode(Token::Value _operator, Type const& _valueType, Type const& _shiftAmountType);
/// @}
/// Appends code to call a function of the given type with the given arguments.
@@ -117,6 +117,10 @@ private:
template <class _LValueType, class... _Arguments>
void setLValue(Expression const& _expression, _Arguments const&... _arguments);
+ /// @returns true if the operator applied to the given type requires a cleanup prior to the
+ /// operation.
+ bool cleanupNeededForOp(Type::Category _type, Token::Value _op);
+
/// @returns the CompilerUtils object containing the current context.
CompilerUtils utils();
diff --git a/libsolidity/codegen/LValue.cpp b/libsolidity/codegen/LValue.cpp
index 23fe2d4e..e19cf41e 100644
--- a/libsolidity/codegen/LValue.cpp
+++ b/libsolidity/codegen/LValue.cpp
@@ -186,7 +186,7 @@ void StorageItem::retrieveValue(SourceLocation const&, bool _remove) const
solUnimplemented("Not yet implemented - FixedPointType.");
if (m_dataType->category() == Type::Category::FixedBytes)
{
- m_context << (u256(0x1) << (256 - 8 * m_dataType->storageBytes())) << Instruction::MUL;
+ CompilerUtils(m_context).leftShiftNumberOnStack(256 - 8 * m_dataType->storageBytes());
cleaned = true;
}
else if (
@@ -199,7 +199,7 @@ void StorageItem::retrieveValue(SourceLocation const&, bool _remove) const
}
else if (FunctionType const* fun = dynamic_cast<decltype(fun)>(m_dataType))
{
- if (fun->location() == FunctionType::Location::External)
+ if (fun->kind() == FunctionType::Kind::External)
{
CompilerUtils(m_context).splitExternalFunctionType(false);
cleaned = true;
@@ -223,7 +223,6 @@ void StorageItem::storeValue(Type const& _sourceType, SourceLocation const& _loc
{
solAssert(m_dataType->storageBytes() <= 32, "Invalid storage bytes size.");
solAssert(m_dataType->storageBytes() > 0, "Invalid storage bytes size.");
-
if (m_dataType->storageBytes() == 32)
{
solAssert(m_dataType->sizeOnStack() == 1, "Invalid stack size.");
@@ -257,7 +256,7 @@ void StorageItem::storeValue(Type const& _sourceType, SourceLocation const& _loc
if (FunctionType const* fun = dynamic_cast<decltype(fun)>(m_dataType))
{
solAssert(_sourceType == *m_dataType, "function item stored but target is not equal to source");
- if (fun->location() == FunctionType::Location::External)
+ if (fun->kind() == FunctionType::Kind::External)
// Combine the two-item function type into a single stack slot.
utils.combineExternalFunctionType(false);
else
@@ -268,9 +267,7 @@ void StorageItem::storeValue(Type const& _sourceType, SourceLocation const& _loc
else if (m_dataType->category() == Type::Category::FixedBytes)
{
solAssert(_sourceType.category() == Type::Category::FixedBytes, "source not fixed bytes");
- m_context
- << (u256(0x1) << (256 - 8 * dynamic_cast<FixedBytesType const&>(*m_dataType).numBytes()))
- << Instruction::SWAP1 << Instruction::DIV;
+ CompilerUtils(m_context).rightShiftNumberOnStack(256 - 8 * dynamic_cast<FixedBytesType const&>(*m_dataType).numBytes(), false);
}
else
{