aboutsummaryrefslogtreecommitdiffstats
path: root/libsolidity/codegen
diff options
context:
space:
mode:
authorchriseth <chris@ethereum.org>2018-04-17 05:03:49 +0800
committerGitHub <noreply@github.com>2018-04-17 05:03:49 +0800
commit4cb486ee993cadde5564fb6c611d2bcf4fc44414 (patch)
tree6b971913021037cf28141c38af97c7ecda77719f /libsolidity/codegen
parentdfe3193c7382c80f1814247a162663a97c3f5e67 (diff)
parentb8431c5c4a6795db8c42a1a3389047658bb87301 (diff)
downloaddexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.tar
dexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.tar.gz
dexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.tar.bz2
dexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.tar.lz
dexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.tar.xz
dexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.tar.zst
dexon-solidity-4cb486ee993cadde5564fb6c611d2bcf4fc44414.zip
Merge pull request #3892 from ethereum/develop
Merge develop into release for 0.4.22
Diffstat (limited to 'libsolidity/codegen')
-rw-r--r--libsolidity/codegen/ABIFunctions.cpp3
-rw-r--r--libsolidity/codegen/ArrayUtils.cpp53
-rw-r--r--libsolidity/codegen/ArrayUtils.h6
-rw-r--r--libsolidity/codegen/CompilerContext.cpp32
-rw-r--r--libsolidity/codegen/CompilerContext.h9
-rw-r--r--libsolidity/codegen/CompilerUtils.cpp284
-rw-r--r--libsolidity/codegen/CompilerUtils.h28
-rw-r--r--libsolidity/codegen/ContractCompiler.cpp158
-rw-r--r--libsolidity/codegen/ContractCompiler.h6
-rw-r--r--libsolidity/codegen/ExpressionCompiler.cpp275
10 files changed, 616 insertions, 238 deletions
diff --git a/libsolidity/codegen/ABIFunctions.cpp b/libsolidity/codegen/ABIFunctions.cpp
index 00f59065..8e890854 100644
--- a/libsolidity/codegen/ABIFunctions.cpp
+++ b/libsolidity/codegen/ABIFunctions.cpp
@@ -253,6 +253,9 @@ string ABIFunctions::cleanupFunction(Type const& _type, bool _revertOnFailure)
templ("body", w.render());
break;
}
+ case Type::Category::InaccessibleDynamic:
+ templ("body", "cleaned := 0");
+ break;
default:
solAssert(false, "Cleanup of type " + _type.identifier() + " requested.");
}
diff --git a/libsolidity/codegen/ArrayUtils.cpp b/libsolidity/codegen/ArrayUtils.cpp
index ce8cbb5f..0fe66d2d 100644
--- a/libsolidity/codegen/ArrayUtils.cpp
+++ b/libsolidity/codegen/ArrayUtils.cpp
@@ -741,10 +741,10 @@ void ArrayUtils::resizeDynamicArray(ArrayType const& _typeIn) const
if (_type.isByteArray())
// For a "long" byte array, store length as 2*length+1
_context << Instruction::DUP1 << Instruction::ADD << u256(1) << Instruction::ADD;
- _context<< Instruction::DUP4 << Instruction::SSTORE;
+ _context << Instruction::DUP4 << Instruction::SSTORE;
// skip if size is not reduced
_context << Instruction::DUP2 << Instruction::DUP2
- << Instruction::ISZERO << Instruction::GT;
+ << Instruction::GT << Instruction::ISZERO;
_context.appendConditionalJumpTo(resizeEnd);
// size reduced, clear the end of the array
@@ -774,6 +774,55 @@ void ArrayUtils::resizeDynamicArray(ArrayType const& _typeIn) const
);
}
+void ArrayUtils::incrementDynamicArraySize(ArrayType const& _type) const
+{
+ solAssert(_type.location() == DataLocation::Storage, "");
+ solAssert(_type.isDynamicallySized(), "");
+ if (!_type.isByteArray() && _type.baseType()->storageBytes() < 32)
+ solAssert(_type.baseType()->isValueType(), "Invalid storage size for non-value type.");
+
+ if (_type.isByteArray())
+ {
+ // We almost always just add 2 (length of byte arrays is shifted left by one)
+ // except for the case where we transition from a short byte array
+ // to a long byte array, there we have to copy.
+ // This happens if the length is exactly 31, which means that the
+ // lowest-order byte (we actually use a mask with fewer bits) must
+ // be (31*2+0) = 62
+
+ m_context.appendInlineAssembly(R"({
+ let data := sload(ref)
+ let shifted_length := and(data, 63)
+ // We have to copy if length is exactly 31, because that marks
+ // the transition between in-place and out-of-place storage.
+ switch shifted_length
+ case 62
+ {
+ mstore(0, ref)
+ let data_area := keccak256(0, 0x20)
+ sstore(data_area, and(data, not(0xff)))
+ // New length is 32, encoded as (32 * 2 + 1)
+ sstore(ref, 65)
+ // Replace ref variable by new length
+ ref := 32
+ }
+ default
+ {
+ sstore(ref, add(data, 2))
+ // Replace ref variable by new length
+ if iszero(and(data, 1)) { data := shifted_length }
+ ref := add(div(data, 2), 1)
+ }
+ })", {"ref"});
+ }
+ else
+ m_context.appendInlineAssembly(R"({
+ let new_length := add(sload(ref), 1)
+ sstore(ref, new_length)
+ ref := new_length
+ })", {"ref"});
+}
+
void ArrayUtils::clearStorageLoop(TypePointer const& _type) const
{
m_context.callLowLevelFunction(
diff --git a/libsolidity/codegen/ArrayUtils.h b/libsolidity/codegen/ArrayUtils.h
index f3ddc4ee..99786397 100644
--- a/libsolidity/codegen/ArrayUtils.h
+++ b/libsolidity/codegen/ArrayUtils.h
@@ -67,6 +67,12 @@ public:
/// Stack pre: reference (excludes byte offset) new_length
/// Stack post:
void resizeDynamicArray(ArrayType const& _type) const;
+ /// Increments the size of a dynamic array by one.
+ /// Does not touch the new data element. In case of a byte array, this might move the
+ /// data.
+ /// Stack pre: reference (excludes byte offset)
+ /// Stack post: new_length
+ void incrementDynamicArraySize(ArrayType const& _type) const;
/// Appends a loop that clears a sequence of storage slots of the given type (excluding end).
/// Stack pre: end_ref start_ref
/// Stack post: end_ref
diff --git a/libsolidity/codegen/CompilerContext.cpp b/libsolidity/codegen/CompilerContext.cpp
index 0bf88267..a35eea73 100644
--- a/libsolidity/codegen/CompilerContext.cpp
+++ b/libsolidity/codegen/CompilerContext.cpp
@@ -193,14 +193,22 @@ Declaration const* CompilerContext::nextFunctionToCompile() const
return m_functionCompilationQueue.nextFunctionToCompile();
}
-ModifierDefinition const& CompilerContext::functionModifier(string const& _name) const
+ModifierDefinition const& CompilerContext::resolveVirtualFunctionModifier(
+ ModifierDefinition const& _modifier
+) const
{
+ // Libraries do not allow inheritance and their functions can be inlined, so we should not
+ // search the inheritance hierarchy (which will be the wrong one in case the function
+ // is inlined).
+ if (auto scope = dynamic_cast<ContractDefinition const*>(_modifier.scope()))
+ if (scope->isLibrary())
+ return _modifier;
solAssert(!m_inheritanceHierarchy.empty(), "No inheritance hierarchy set.");
for (ContractDefinition const* contract: m_inheritanceHierarchy)
for (ModifierDefinition const* modifier: contract->functionModifiers())
- if (modifier->name() == _name)
+ if (modifier->name() == _modifier.name())
return *modifier;
- solAssert(false, "Function modifier " + _name + " not found.");
+ solAssert(false, "Function modifier " + _modifier.name() + " not found in inheritance hierarchy.");
}
unsigned CompilerContext::baseStackOffsetOfVariable(Declaration const& _declaration) const
@@ -254,12 +262,20 @@ CompilerContext& CompilerContext::appendRevert()
return *this << u256(0) << u256(0) << Instruction::REVERT;
}
-CompilerContext& CompilerContext::appendConditionalRevert()
+CompilerContext& CompilerContext::appendConditionalRevert(bool _forwardReturnData)
{
- *this << Instruction::ISZERO;
- eth::AssemblyItem afterTag = appendConditionalJump();
- appendRevert();
- *this << afterTag;
+ if (_forwardReturnData && m_evmVersion.supportsReturndata())
+ appendInlineAssembly(R"({
+ if condition {
+ returndatacopy(0, 0, returndatasize())
+ revert(0, returndatasize())
+ }
+ })", {"condition"});
+ else
+ appendInlineAssembly(R"({
+ if condition { revert(0, 0) }
+ })", {"condition"});
+ *this << Instruction::POP;
return *this;
}
diff --git a/libsolidity/codegen/CompilerContext.h b/libsolidity/codegen/CompilerContext.h
index cf626683..098472f7 100644
--- a/libsolidity/codegen/CompilerContext.h
+++ b/libsolidity/codegen/CompilerContext.h
@@ -130,7 +130,7 @@ public:
void appendMissingLowLevelFunctions();
ABIFunctions& abiFunctions() { return m_abiFunctions; }
- ModifierDefinition const& functionModifier(std::string const& _name) const;
+ ModifierDefinition const& resolveVirtualFunctionModifier(ModifierDefinition const& _modifier) const;
/// Returns the distance of the given local variable from the bottom of the stack (of the current function).
unsigned baseStackOffsetOfVariable(Declaration const& _declaration) const;
/// If supplied by a value returned by @ref baseStackOffsetOfVariable(variable), returns
@@ -156,8 +156,11 @@ public:
CompilerContext& appendConditionalInvalid();
/// Appends a REVERT(0, 0) call
CompilerContext& appendRevert();
- /// Appends a conditional REVERT(0, 0) call
- CompilerContext& appendConditionalRevert();
+ /// Appends a conditional REVERT-call, either forwarding the RETURNDATA or providing the
+ /// empty string. Consumes the condition.
+ /// If the current EVM version does not support RETURNDATA, uses REVERT but does not forward
+ /// the data.
+ CompilerContext& appendConditionalRevert(bool _forwardReturnData = false);
/// Appends a JUMP to a specific tag
CompilerContext& appendJumpTo(eth::AssemblyItem const& _tag) { m_asm->appendJump(_tag); return *this; }
/// Appends pushing of a new tag and @returns the new tag.
diff --git a/libsolidity/codegen/CompilerUtils.cpp b/libsolidity/codegen/CompilerUtils.cpp
index 533aca5c..4af7d905 100644
--- a/libsolidity/codegen/CompilerUtils.cpp
+++ b/libsolidity/codegen/CompilerUtils.cpp
@@ -21,12 +21,16 @@
*/
#include <libsolidity/codegen/CompilerUtils.h>
+
#include <libsolidity/ast/AST.h>
-#include <libevmasm/Instruction.h>
#include <libsolidity/codegen/ArrayUtils.h>
#include <libsolidity/codegen/LValue.h>
#include <libsolidity/codegen/ABIFunctions.h>
+#include <libevmasm/Instruction.h>
+
+#include <libdevcore/Whiskers.h>
+
using namespace std;
namespace dev
@@ -36,11 +40,17 @@ namespace solidity
const unsigned CompilerUtils::dataStartOffset = 4;
const size_t CompilerUtils::freeMemoryPointer = 64;
+const size_t CompilerUtils::zeroPointer = CompilerUtils::freeMemoryPointer + 32;
+const size_t CompilerUtils::generalPurposeMemoryStart = CompilerUtils::zeroPointer + 32;
const unsigned CompilerUtils::identityContractAddress = 4;
+static_assert(CompilerUtils::freeMemoryPointer >= 64, "Free memory pointer must not overlap with scratch area.");
+static_assert(CompilerUtils::zeroPointer >= CompilerUtils::freeMemoryPointer + 32, "Zero pointer must not overlap with free memory pointer.");
+static_assert(CompilerUtils::generalPurposeMemoryStart >= CompilerUtils::zeroPointer + 32, "General purpose memory must not overlap with zero area.");
+
void CompilerUtils::initialiseFreeMemoryPointer()
{
- m_context << u256(freeMemoryPointer + 32);
+ m_context << u256(generalPurposeMemoryStart);
storeFreeMemoryPointer();
}
@@ -68,6 +78,20 @@ void CompilerUtils::toSizeAfterFreeMemoryPointer()
m_context << Instruction::SWAP1;
}
+void CompilerUtils::revertWithStringData(Type const& _argumentType)
+{
+ solAssert(_argumentType.isImplicitlyConvertibleTo(*Type::fromElementaryTypeName("string memory")), "");
+ fetchFreeMemoryPointer();
+ m_context << (u256(FixedHash<4>::Arith(FixedHash<4>(dev::keccak256("Error(string)")))) << (256 - 32));
+ m_context << Instruction::DUP2 << Instruction::MSTORE;
+ m_context << u256(4) << Instruction::ADD;
+ // Stack: <string data> <mem pos of encoding start>
+ abiEncode({_argumentType.shared_from_this()}, {make_shared<ArrayType>(DataLocation::Memory, true)});
+ toSizeAfterFreeMemoryPointer();
+ m_context << Instruction::REVERT;
+ m_context.adjustStackOffset(_argumentType.sizeOnStack());
+}
+
unsigned CompilerUtils::loadFromMemory(
unsigned _offset,
Type const& _type,
@@ -139,7 +163,6 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
dynamic_cast<FunctionType const&>(_type).kind() == FunctionType::Kind::External
)
{
- solUnimplementedAssert(_padToWordBoundaries, "Non-padded store for function not implemented.");
combineExternalFunctionType(true);
m_context << Instruction::DUP2 << Instruction::MSTORE;
m_context << u256(_padToWordBoundaries ? 32 : 24) << Instruction::ADD;
@@ -159,6 +182,163 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
}
}
+void CompilerUtils::abiDecode(TypePointers const& _typeParameters, bool _fromMemory, bool _revertOnOutOfBounds)
+{
+ /// Stack: <source_offset> <length>
+ if (m_context.experimentalFeatureActive(ExperimentalFeature::ABIEncoderV2))
+ {
+ // Use the new JULIA-based decoding function
+ auto stackHeightBefore = m_context.stackHeight();
+ abiDecodeV2(_typeParameters, _fromMemory);
+ solAssert(m_context.stackHeight() - stackHeightBefore == sizeOnStack(_typeParameters) - 2, "");
+ return;
+ }
+
+ //@todo this does not yet support nested dynamic arrays
+
+ if (_revertOnOutOfBounds)
+ {
+ size_t encodedSize = 0;
+ for (auto const& t: _typeParameters)
+ encodedSize += t->decodingType()->calldataEncodedSize(true);
+ m_context.appendInlineAssembly("{ if lt(len, " + to_string(encodedSize) + ") { revert(0, 0) } }", {"len"});
+ }
+
+ m_context << Instruction::DUP2 << Instruction::ADD;
+ m_context << Instruction::SWAP1;
+ /// Stack: <input_end> <source_offset>
+
+ // Retain the offset pointer as base_offset, the point from which the data offsets are computed.
+ m_context << Instruction::DUP1;
+ for (TypePointer const& parameterType: _typeParameters)
+ {
+ // stack: v1 v2 ... v(k-1) input_end base_offset current_offset
+ TypePointer type = parameterType->decodingType();
+ solUnimplementedAssert(type, "No decoding type found.");
+ if (type->category() == Type::Category::Array)
+ {
+ auto const& arrayType = dynamic_cast<ArrayType const&>(*type);
+ solUnimplementedAssert(!arrayType.baseType()->isDynamicallyEncoded(), "Nested arrays not yet implemented.");
+ if (_fromMemory)
+ {
+ solUnimplementedAssert(
+ arrayType.baseType()->isValueType(),
+ "Nested memory arrays not yet implemented here."
+ );
+ // @todo If base type is an array or struct, it is still calldata-style encoded, so
+ // we would have to convert it like below.
+ solAssert(arrayType.location() == DataLocation::Memory, "");
+ if (arrayType.isDynamicallySized())
+ {
+ // compute data pointer
+ m_context << Instruction::DUP1 << Instruction::MLOAD;
+ if (_revertOnOutOfBounds)
+ {
+ // Check that the data pointer is valid and that length times
+ // item size is still inside the range.
+ Whiskers templ(R"({
+ if gt(ptr, 0x100000000) { revert(0, 0) }
+ ptr := add(ptr, base_offset)
+ let array_data_start := add(ptr, 0x20)
+ if gt(array_data_start, input_end) { revert(0, 0) }
+ let array_length := mload(ptr)
+ if or(
+ gt(array_length, 0x100000000),
+ gt(add(array_data_start, mul(array_length, <item_size>)), input_end)
+ ) { revert(0, 0) }
+ })");
+ templ("item_size", to_string(arrayType.isByteArray() ? 1 : arrayType.baseType()->calldataEncodedSize(true)));
+ m_context.appendInlineAssembly(templ.render(), {"input_end", "base_offset", "offset", "ptr"});
+ }
+ else
+ m_context << Instruction::DUP3 << Instruction::ADD;
+ // stack: v1 v2 ... v(k-1) input_end base_offset current_offset v(k)
+ moveIntoStack(3);
+ m_context << u256(0x20) << Instruction::ADD;
+ }
+ else
+ {
+ // Size has already been checked for this one.
+ moveIntoStack(2);
+ m_context << Instruction::DUP3;
+ m_context << u256(arrayType.calldataEncodedSize(true)) << Instruction::ADD;
+ }
+ }
+ else
+ {
+ // first load from calldata and potentially convert to memory if arrayType is memory
+ TypePointer calldataType = arrayType.copyForLocation(DataLocation::CallData, false);
+ if (calldataType->isDynamicallySized())
+ {
+ // put on stack: data_pointer length
+ loadFromMemoryDynamic(IntegerType(256), !_fromMemory);
+ m_context << Instruction::SWAP1;
+ // stack: input_end base_offset next_pointer data_offset
+ if (_revertOnOutOfBounds)
+ m_context.appendInlineAssembly("{ if gt(data_offset, 0x100000000) { revert(0, 0) } }", {"data_offset"});
+ m_context << Instruction::DUP3 << Instruction::ADD;
+ // stack: input_end base_offset next_pointer array_head_ptr
+ if (_revertOnOutOfBounds)
+ m_context.appendInlineAssembly(
+ "{ if gt(add(array_head_ptr, 0x20), input_end) { revert(0, 0) } }",
+ {"input_end", "base_offset", "next_ptr", "array_head_ptr"}
+ );
+ // retrieve length
+ loadFromMemoryDynamic(IntegerType(256), !_fromMemory, true);
+ // stack: input_end base_offset next_pointer array_length data_pointer
+ m_context << Instruction::SWAP2;
+ // stack: input_end base_offset data_pointer array_length next_pointer
+ if (_revertOnOutOfBounds)
+ {
+ unsigned itemSize = arrayType.isByteArray() ? 1 : arrayType.baseType()->calldataEncodedSize(true);
+ m_context.appendInlineAssembly(R"({
+ if or(
+ gt(array_length, 0x100000000),
+ gt(add(data_ptr, mul(array_length, )" + to_string(itemSize) + R"()), input_end)
+ ) { revert(0, 0) }
+ })", {"input_end", "base_offset", "data_ptr", "array_length", "next_ptr"});
+ }
+ }
+ else
+ {
+ // size has already been checked
+ // stack: input_end base_offset data_offset
+ m_context << Instruction::DUP1;
+ m_context << u256(calldataType->calldataEncodedSize()) << Instruction::ADD;
+ }
+ if (arrayType.location() == DataLocation::Memory)
+ {
+ // stack: input_end base_offset calldata_ref [length] next_calldata
+ // copy to memory
+ // move calldata type up again
+ moveIntoStack(calldataType->sizeOnStack());
+ convertType(*calldataType, arrayType, false, false, true);
+ // fetch next pointer again
+ moveToStackTop(arrayType.sizeOnStack());
+ }
+ // move input_end up
+ // stack: input_end base_offset calldata_ref [length] next_calldata
+ moveToStackTop(2 + arrayType.sizeOnStack());
+ m_context << Instruction::SWAP1;
+ // stack: base_offset calldata_ref [length] input_end next_calldata
+ moveToStackTop(2 + arrayType.sizeOnStack());
+ m_context << Instruction::SWAP1;
+ // stack: calldata_ref [length] input_end base_offset next_calldata
+ }
+ }
+ else
+ {
+ solAssert(!type->isDynamicallyEncoded(), "Unknown dynamically sized type: " + type->toString());
+ loadFromMemoryDynamic(*type, !_fromMemory, true);
+ // stack: v1 v2 ... v(k-1) input_end base_offset v(k) mem_offset
+ moveToStackTop(1, type->sizeOnStack());
+ moveIntoStack(3, type->sizeOnStack());
+ }
+ // stack: v1 v2 ... v(k-1) v(k) input_end base_offset next_offset
+ }
+ popStackSlots(3);
+}
+
void CompilerUtils::encodeToMemory(
TypePointers const& _givenTypes,
TypePointers const& _targetTypes,
@@ -321,15 +501,13 @@ void CompilerUtils::abiEncodeV2(
void CompilerUtils::abiDecodeV2(TypePointers const& _parameterTypes, bool _fromMemory)
{
- // stack: <source_offset>
+ // stack: <source_offset> <length> [stack top]
auto ret = m_context.pushNewTag();
+ moveIntoStack(2);
+ // stack: <return tag> <source_offset> <length> [stack top]
+ m_context << Instruction::DUP2 << Instruction::ADD;
m_context << Instruction::SWAP1;
- if (_fromMemory)
- // TODO pass correct size for the memory case
- m_context << (u256(1) << 63);
- else
- m_context << Instruction::CALLDATASIZE;
- m_context << Instruction::SWAP1;
+ // stack: <return tag> <end> <start>
string decoderName = m_context.abiFunctions().tupleDecoder(_parameterTypes, _fromMemory);
m_context.appendJumpTo(m_context.namedTag(decoderName));
m_context.adjustStackOffset(int(sizeOnStack(_parameterTypes)) - 3);
@@ -338,14 +516,34 @@ void CompilerUtils::abiDecodeV2(TypePointers const& _parameterTypes, bool _fromM
void CompilerUtils::zeroInitialiseMemoryArray(ArrayType const& _type)
{
- auto repeat = m_context.newTag();
- m_context << repeat;
- pushZeroValue(*_type.baseType());
- storeInMemoryDynamic(*_type.baseType());
- m_context << Instruction::SWAP1 << u256(1) << Instruction::SWAP1;
- m_context << Instruction::SUB << Instruction::SWAP1;
- m_context << Instruction::DUP2;
- m_context.appendConditionalJumpTo(repeat);
+ if (_type.baseType()->hasSimpleZeroValueInMemory())
+ {
+ solAssert(_type.baseType()->isValueType(), "");
+ Whiskers templ(R"({
+ let size := mul(length, <element_size>)
+ // cheap way of zero-initializing a memory range
+ codecopy(memptr, codesize(), size)
+ memptr := add(memptr, size)
+ })");
+ templ("element_size", to_string(_type.baseType()->memoryHeadSize()));
+ m_context.appendInlineAssembly(templ.render(), {"length", "memptr"});
+ }
+ else
+ {
+ // TODO: Potential optimization:
+ // When we create a new multi-dimensional dynamic array, each element
+ // is initialized to an empty array. It actually does not hurt
+ // to re-use exactly the same empty array for all elements. Currently,
+ // a new one is created each time.
+ auto repeat = m_context.newTag();
+ m_context << repeat;
+ pushZeroValue(*_type.baseType());
+ storeInMemoryDynamic(*_type.baseType());
+ m_context << Instruction::SWAP1 << u256(1) << Instruction::SWAP1;
+ m_context << Instruction::SUB << Instruction::SWAP1;
+ m_context << Instruction::DUP2;
+ m_context.appendConditionalJumpTo(repeat);
+ }
m_context << Instruction::SWAP1 << Instruction::POP;
}
@@ -427,7 +625,7 @@ void CompilerUtils::combineExternalFunctionType(bool _leftAligned)
leftShiftNumberOnStack(64);
}
-void CompilerUtils::pushCombinedFunctionEntryLabel(Declaration const& _function)
+void CompilerUtils::pushCombinedFunctionEntryLabel(Declaration const& _function, bool _runtimeOnly)
{
m_context << m_context.functionEntryLabel(_function).pushTag();
// If there is a runtime context, we have to merge both labels into the same
@@ -435,9 +633,10 @@ void CompilerUtils::pushCombinedFunctionEntryLabel(Declaration const& _function)
if (CompilerContext* rtc = m_context.runtimeContext())
{
leftShiftNumberOnStack(32);
- m_context <<
- rtc->functionEntryLabel(_function).toSubAssemblyTag(m_context.runtimeSub()) <<
- Instruction::OR;
+ if (_runtimeOnly)
+ m_context <<
+ rtc->functionEntryLabel(_function).toSubAssemblyTag(m_context.runtimeSub()) <<
+ Instruction::OR;
}
}
@@ -485,19 +684,17 @@ void CompilerUtils::convertType(
// clear for conversion to longer bytes
solAssert(targetTypeCategory == Type::Category::FixedBytes, "Invalid type conversion requested.");
FixedBytesType const& targetType = dynamic_cast<FixedBytesType const&>(_targetType);
- if (targetType.numBytes() > typeOnStack.numBytes() || _cleanupNeeded)
+ if (typeOnStack.numBytes() == 0 || targetType.numBytes() == 0)
+ m_context << Instruction::POP << u256(0);
+ else if (targetType.numBytes() > typeOnStack.numBytes() || _cleanupNeeded)
{
- if (typeOnStack.numBytes() == 0)
- m_context << Instruction::POP << u256(0);
- else
- {
- m_context << ((u256(1) << (256 - typeOnStack.numBytes() * 8)) - 1);
- m_context << Instruction::NOT << Instruction::AND;
- }
+ int bytes = min(typeOnStack.numBytes(), targetType.numBytes());
+ m_context << ((u256(1) << (256 - bytes * 8)) - 1);
+ m_context << Instruction::NOT << Instruction::AND;
}
}
- }
break;
+ }
case Type::Category::Enum:
solAssert(_targetType == _typeOnStack || targetTypeCategory == Type::Category::Integer, "");
if (enumOverflowCheckPending)
@@ -506,6 +703,7 @@ void CompilerUtils::convertType(
solAssert(enumType.numberOfMembers() > 0, "empty enum should have caused a parser error.");
m_context << u256(enumType.numberOfMembers() - 1) << Instruction::DUP2 << Instruction::GT;
if (_asPartOfArgumentDecoding)
+ // TODO: error message?
m_context.appendConditionalRevert();
else
m_context.appendConditionalInvalid();
@@ -598,8 +796,9 @@ void CompilerUtils::convertType(
bytesConstRef data(value);
if (targetTypeCategory == Type::Category::FixedBytes)
{
+ int const numBytes = dynamic_cast<FixedBytesType const&>(_targetType).numBytes();
solAssert(data.size() <= 32, "");
- m_context << h256::Arith(h256(data, h256::AlignLeft));
+ m_context << (h256::Arith(h256(data, h256::AlignLeft)) & (~(u256(-1) >> (8 * numBytes))));
}
else if (targetTypeCategory == Type::Category::Array)
{
@@ -873,6 +1072,13 @@ void CompilerUtils::pushZeroValue(Type const& _type)
return;
}
solAssert(referenceType->location() == DataLocation::Memory, "");
+ if (auto arrayType = dynamic_cast<ArrayType const*>(&_type))
+ if (arrayType->isDynamicallySized())
+ {
+ // Push a memory location that is (hopefully) always zero.
+ pushZeroPointer();
+ return;
+ }
TypePointer type = _type.shared_from_this();
m_context.callLowLevelFunction(
@@ -893,13 +1099,8 @@ void CompilerUtils::pushZeroValue(Type const& _type)
}
else if (auto arrayType = dynamic_cast<ArrayType const*>(type.get()))
{
- if (arrayType->isDynamicallySized())
- {
- // zero length
- _context << u256(0);
- utils.storeInMemoryDynamic(IntegerType(256));
- }
- else if (arrayType->length() > 0)
+ solAssert(!arrayType->isDynamicallySized(), "");
+ if (arrayType->length() > 0)
{
_context << arrayType->length() << Instruction::SWAP1;
// stack: items_to_do memory_pos
@@ -916,6 +1117,11 @@ void CompilerUtils::pushZeroValue(Type const& _type)
);
}
+void CompilerUtils::pushZeroPointer()
+{
+ m_context << u256(zeroPointer);
+}
+
void CompilerUtils::moveToStackVariable(VariableDeclaration const& _variable)
{
unsigned const stackPosition = m_context.baseToCurrentStackOffset(m_context.baseStackOffsetOfVariable(_variable));
diff --git a/libsolidity/codegen/CompilerUtils.h b/libsolidity/codegen/CompilerUtils.h
index 3cde281b..476a7559 100644
--- a/libsolidity/codegen/CompilerUtils.h
+++ b/libsolidity/codegen/CompilerUtils.h
@@ -54,6 +54,13 @@ public:
/// Stack post: <size> <mem_start>
void toSizeAfterFreeMemoryPointer();
+ /// Appends code that performs a revert, providing the given string data.
+ /// Will also append an error signature corresponding to Error(string).
+ /// @param _argumentType the type of the string argument, will be converted to memory string.
+ /// Stack pre: string data
+ /// Stack post:
+ void revertWithStringData(Type const& _argumentType);
+
/// Loads data from memory to the stack.
/// @param _offset offset in memory (or calldata)
/// @param _type data type to load
@@ -88,6 +95,15 @@ public:
/// Stack post: (memory_offset+length)
void storeInMemoryDynamic(Type const& _type, bool _padToWords = true);
+ /// Creates code that unpacks the arguments according to their types specified by a vector of TypePointers.
+ /// From memory if @a _fromMemory is true, otherwise from call data.
+ /// Calls revert if @a _revertOnOutOfBounds is true and the supplied size is shorter
+ /// than the static data requirements or if dynamic data pointers reach outside of the
+ /// area. Also has a hard cap of 0x100000000 for any given length/offset field.
+ /// Stack pre: <source_offset> <length>
+ /// Stack post: <value0> <value1> ... <valuen>
+ void abiDecode(TypePointers const& _typeParameters, bool _fromMemory = false, bool _revertOnOutOfBounds = false);
+
/// Copies values (of types @a _givenTypes) given on the stack to a location in memory given
/// at the stack top, encoding them according to the ABI as the given types @a _targetTypes.
/// Removes the values from the stack and leaves the updated memory pointer.
@@ -149,7 +165,7 @@ public:
/// Decodes data from ABI encoding into internal encoding. If @a _fromMemory is set to true,
/// the data is taken from memory instead of from calldata.
/// Can allocate memory.
- /// Stack pre: <source_offset>
+ /// Stack pre: <source_offset> <length>
/// Stack post: <value0> <value1> ... <valuen>
void abiDecodeV2(TypePointers const& _parameterTypes, bool _fromMemory = false);
@@ -179,7 +195,8 @@ public:
/// Appends code that combines the construction-time (if available) and runtime function
/// entry label of the given function into a single stack slot.
/// Note: This might cause the compilation queue of the runtime context to be extended.
- void pushCombinedFunctionEntryLabel(Declaration const& _function);
+ /// If @a _runtimeOnly, the entry label will include the runtime assembly tag.
+ void pushCombinedFunctionEntryLabel(Declaration const& _function, bool _runtimeOnly = true);
/// Appends code for an implicit or explicit type conversion. This includes erasing higher
/// order bits (@see appendHighBitCleanup) when widening integer but also copy to memory
@@ -200,6 +217,9 @@ public:
/// Creates a zero-value for the given type and puts it onto the stack. This might allocate
/// memory for memory references.
void pushZeroValue(Type const& _type);
+ /// Pushes a pointer to the stack that points to a (potentially shared) location in memory
+ /// that always contains a zero. It is not allowed to write there.
+ void pushZeroPointer();
/// Moves the value that is at the top of the stack to a stack variable.
void moveToStackVariable(VariableDeclaration const& _variable);
@@ -245,6 +265,10 @@ public:
/// Position of the free-memory-pointer in memory;
static const size_t freeMemoryPointer;
+ /// Position of the memory slot that is always zero.
+ static const size_t zeroPointer;
+ /// Starting offset for memory available to the user (aka the contract).
+ static const size_t generalPurposeMemoryStart;
private:
/// Address of the precompiled identity contract.
diff --git a/libsolidity/codegen/ContractCompiler.cpp b/libsolidity/codegen/ContractCompiler.cpp
index 5a9498f0..0889ac7c 100644
--- a/libsolidity/codegen/ContractCompiler.cpp
+++ b/libsolidity/codegen/ContractCompiler.cpp
@@ -128,6 +128,7 @@ void ContractCompiler::appendCallValueCheck()
{
// Throw if function is not payable but call contained ether.
m_context << Instruction::CALLVALUE;
+ // TODO: error message?
m_context.appendConditionalRevert();
}
@@ -135,33 +136,13 @@ void ContractCompiler::appendInitAndConstructorCode(ContractDefinition const& _c
{
solAssert(!_contract.isLibrary(), "Tried to initialize library.");
CompilerContext::LocationSetter locationSetter(m_context, _contract);
- // Determine the arguments that are used for the base constructors.
- std::vector<ContractDefinition const*> const& bases = _contract.annotation().linearizedBaseContracts;
- for (ContractDefinition const* contract: bases)
- {
- if (FunctionDefinition const* constructor = contract->constructor())
- for (auto const& modifier: constructor->modifiers())
- {
- auto baseContract = dynamic_cast<ContractDefinition const*>(
- modifier->name()->annotation().referencedDeclaration);
- if (baseContract)
- if (m_baseArguments.count(baseContract->constructor()) == 0)
- m_baseArguments[baseContract->constructor()] = &modifier->arguments();
- }
- for (ASTPointer<InheritanceSpecifier> const& base: contract->baseContracts())
- {
- ContractDefinition const* baseContract = dynamic_cast<ContractDefinition const*>(
- base->name().annotation().referencedDeclaration
- );
- solAssert(baseContract, "");
+ m_baseArguments = &_contract.annotation().baseConstructorArguments;
- if (m_baseArguments.count(baseContract->constructor()) == 0)
- m_baseArguments[baseContract->constructor()] = &base->arguments();
- }
- }
// Initialization of state variables in base-to-derived order.
- for (ContractDefinition const* contract: boost::adaptors::reverse(bases))
+ for (ContractDefinition const* contract: boost::adaptors::reverse(
+ _contract.annotation().linearizedBaseContracts
+ ))
initializeStateVariables(*contract);
if (FunctionDefinition const* constructor = _contract.constructor())
@@ -235,9 +216,16 @@ void ContractCompiler::appendBaseConstructor(FunctionDefinition const& _construc
FunctionType constructorType(_constructor);
if (!constructorType.parameterTypes().empty())
{
- solAssert(m_baseArguments.count(&_constructor), "");
- std::vector<ASTPointer<Expression>> const* arguments = m_baseArguments[&_constructor];
+ solAssert(m_baseArguments, "");
+ solAssert(m_baseArguments->count(&_constructor), "");
+ std::vector<ASTPointer<Expression>> const* arguments = nullptr;
+ ASTNode const* baseArgumentNode = m_baseArguments->at(&_constructor);
+ if (auto inheritanceSpecifier = dynamic_cast<InheritanceSpecifier const*>(baseArgumentNode))
+ arguments = inheritanceSpecifier->arguments();
+ else if (auto modifierInvocation = dynamic_cast<ModifierInvocation const*>(baseArgumentNode))
+ arguments = modifierInvocation->arguments();
solAssert(arguments, "");
+ solAssert(arguments->size() == constructorType.parameterTypes().size(), "");
for (unsigned i = 0; i < arguments->size(); ++i)
compileExpression(*(arguments->at(i)), constructorType.parameterTypes()[i]);
}
@@ -278,9 +266,10 @@ void ContractCompiler::appendConstructor(FunctionDefinition const& _constructor)
m_context.appendProgramSize();
m_context << Instruction::DUP4 << Instruction::CODECOPY;
m_context << Instruction::DUP2 << Instruction::ADD;
+ m_context << Instruction::DUP1;
CompilerUtils(m_context).storeFreeMemoryPointer();
// stack: <memptr>
- appendCalldataUnpacker(FunctionType(_constructor).parameterTypes(), true);
+ CompilerUtils(m_context).abiDecode(FunctionType(_constructor).parameterTypes(), true);
}
_constructor.accept(*this);
}
@@ -339,6 +328,7 @@ void ContractCompiler::appendFunctionSelector(ContractDefinition const& _contrac
m_context << Instruction::STOP;
}
else
+ // TODO: error message here?
m_context.appendRevert();
for (auto const& it: interfaceFunctions)
@@ -367,7 +357,8 @@ void ContractCompiler::appendFunctionSelector(ContractDefinition const& _contrac
{
// Parameter for calldataUnpacker
m_context << CompilerUtils::dataStartOffset;
- appendCalldataUnpacker(functionType->parameterTypes());
+ m_context << Instruction::DUP1 << Instruction::CALLDATASIZE << Instruction::SUB;
+ CompilerUtils(m_context).abiDecode(functionType->parameterTypes());
}
m_context.appendJumpTo(m_context.functionEntryLabel(functionType->declaration()));
m_context << returnTag;
@@ -382,105 +373,6 @@ void ContractCompiler::appendFunctionSelector(ContractDefinition const& _contrac
}
}
-void ContractCompiler::appendCalldataUnpacker(TypePointers const& _typeParameters, bool _fromMemory)
-{
- // We do not check the calldata size, everything is zero-padded
-
- if (m_context.experimentalFeatureActive(ExperimentalFeature::ABIEncoderV2))
- {
- // Use the new JULIA-based decoding function
- auto stackHeightBefore = m_context.stackHeight();
- CompilerUtils(m_context).abiDecodeV2(_typeParameters, _fromMemory);
- solAssert(m_context.stackHeight() - stackHeightBefore == CompilerUtils(m_context).sizeOnStack(_typeParameters) - 1, "");
- return;
- }
-
- //@todo this does not yet support nested dynamic arrays
-
- // Retain the offset pointer as base_offset, the point from which the data offsets are computed.
- m_context << Instruction::DUP1;
- for (TypePointer const& parameterType: _typeParameters)
- {
- // stack: v1 v2 ... v(k-1) base_offset current_offset
- TypePointer type = parameterType->decodingType();
- solUnimplementedAssert(type, "No decoding type found.");
- if (type->category() == Type::Category::Array)
- {
- auto const& arrayType = dynamic_cast<ArrayType const&>(*type);
- solUnimplementedAssert(!arrayType.baseType()->isDynamicallySized(), "Nested arrays not yet implemented.");
- if (_fromMemory)
- {
- solUnimplementedAssert(
- arrayType.baseType()->isValueType(),
- "Nested memory arrays not yet implemented here."
- );
- // @todo If base type is an array or struct, it is still calldata-style encoded, so
- // we would have to convert it like below.
- solAssert(arrayType.location() == DataLocation::Memory, "");
- if (arrayType.isDynamicallySized())
- {
- // compute data pointer
- m_context << Instruction::DUP1 << Instruction::MLOAD;
- m_context << Instruction::DUP3 << Instruction::ADD;
- m_context << Instruction::SWAP2 << Instruction::SWAP1;
- m_context << u256(0x20) << Instruction::ADD;
- }
- else
- {
- m_context << Instruction::SWAP1 << Instruction::DUP2;
- m_context << u256(arrayType.calldataEncodedSize(true)) << Instruction::ADD;
- }
- }
- else
- {
- // first load from calldata and potentially convert to memory if arrayType is memory
- TypePointer calldataType = arrayType.copyForLocation(DataLocation::CallData, false);
- if (calldataType->isDynamicallySized())
- {
- // put on stack: data_pointer length
- CompilerUtils(m_context).loadFromMemoryDynamic(IntegerType(256), !_fromMemory);
- // stack: base_offset data_offset next_pointer
- m_context << Instruction::SWAP1 << Instruction::DUP3 << Instruction::ADD;
- // stack: base_offset next_pointer data_pointer
- // retrieve length
- CompilerUtils(m_context).loadFromMemoryDynamic(IntegerType(256), !_fromMemory, true);
- // stack: base_offset next_pointer length data_pointer
- m_context << Instruction::SWAP2;
- // stack: base_offset data_pointer length next_pointer
- }
- else
- {
- // leave the pointer on the stack
- m_context << Instruction::DUP1;
- m_context << u256(calldataType->calldataEncodedSize()) << Instruction::ADD;
- }
- if (arrayType.location() == DataLocation::Memory)
- {
- // stack: base_offset calldata_ref [length] next_calldata
- // copy to memory
- // move calldata type up again
- CompilerUtils(m_context).moveIntoStack(calldataType->sizeOnStack());
- CompilerUtils(m_context).convertType(*calldataType, arrayType, false, false, true);
- // fetch next pointer again
- CompilerUtils(m_context).moveToStackTop(arrayType.sizeOnStack());
- }
- // move base_offset up
- CompilerUtils(m_context).moveToStackTop(1 + arrayType.sizeOnStack());
- m_context << Instruction::SWAP1;
- }
- }
- else
- {
- solAssert(!type->isDynamicallySized(), "Unknown dynamically sized type: " + type->toString());
- CompilerUtils(m_context).loadFromMemoryDynamic(*type, !_fromMemory, true);
- CompilerUtils(m_context).moveToStackTop(1 + type->sizeOnStack());
- m_context << Instruction::SWAP1;
- }
- // stack: v1 v2 ... v(k-1) v(k) base_offset mem_offset
- }
- m_context << Instruction::POP << Instruction::POP;
-}
-
void ContractCompiler::appendReturnValuePacker(TypePointers const& _typeParameters, bool _isLibrary)
{
CompilerUtils utils(m_context);
@@ -1002,15 +894,21 @@ void ContractCompiler::appendModifierOrFunctionCode()
appendModifierOrFunctionCode();
else
{
- ModifierDefinition const& modifier = m_context.functionModifier(modifierInvocation->name()->name());
+ ModifierDefinition const& nonVirtualModifier = dynamic_cast<ModifierDefinition const&>(
+ *modifierInvocation->name()->annotation().referencedDeclaration
+ );
+ ModifierDefinition const& modifier = m_context.resolveVirtualFunctionModifier(nonVirtualModifier);
CompilerContext::LocationSetter locationSetter(m_context, modifier);
- solAssert(modifier.parameters().size() == modifierInvocation->arguments().size(), "");
+ std::vector<ASTPointer<Expression>> const& modifierArguments =
+ modifierInvocation->arguments() ? *modifierInvocation->arguments() : std::vector<ASTPointer<Expression>>();
+
+ solAssert(modifier.parameters().size() == modifierArguments.size(), "");
for (unsigned i = 0; i < modifier.parameters().size(); ++i)
{
m_context.addVariable(*modifier.parameters()[i]);
addedVariables.push_back(modifier.parameters()[i].get());
compileExpression(
- *modifierInvocation->arguments()[i],
+ *modifierArguments[i],
modifier.parameters()[i]->annotation().type
);
}
diff --git a/libsolidity/codegen/ContractCompiler.h b/libsolidity/codegen/ContractCompiler.h
index 8559ea58..02a3452f 100644
--- a/libsolidity/codegen/ContractCompiler.h
+++ b/libsolidity/codegen/ContractCompiler.h
@@ -90,10 +90,6 @@ private:
void appendDelegatecallCheck();
void appendFunctionSelector(ContractDefinition const& _contract);
void appendCallValueCheck();
- /// Creates code that unpacks the arguments for the given function represented by a vector of TypePointers.
- /// From memory if @a _fromMemory is true, otherwise from call data.
- /// Expects source offset on the stack, which is removed.
- void appendCalldataUnpacker(TypePointers const& _typeParameters, bool _fromMemory = false);
void appendReturnValuePacker(TypePointers const& _typeParameters, bool _isLibrary);
void registerStateVariables(ContractDefinition const& _contract);
@@ -139,7 +135,7 @@ private:
FunctionDefinition const* m_currentFunction = nullptr;
unsigned m_stackCleanupForReturn = 0; ///< this number of stack elements need to be removed before jump to m_returnTag
// arguments for base constructors, filled in derived-to-base order
- std::map<FunctionDefinition const*, std::vector<ASTPointer<Expression>> const*> m_baseArguments;
+ std::map<FunctionDefinition const*, ASTNode const*> const* m_baseArguments;
};
}
diff --git a/libsolidity/codegen/ExpressionCompiler.cpp b/libsolidity/codegen/ExpressionCompiler.cpp
index 7162cb0d..3cf46a9d 100644
--- a/libsolidity/codegen/ExpressionCompiler.cpp
+++ b/libsolidity/codegen/ExpressionCompiler.cpp
@@ -33,6 +33,8 @@
#include <libsolidity/codegen/LValue.h>
#include <libevmasm/GasMeter.h>
+#include <libdevcore/Whiskers.h>
+
using namespace std;
namespace dev
@@ -139,8 +141,8 @@ void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const&
utils().popStackSlots(paramTypes.size() - 1);
}
unsigned retSizeOnStack = 0;
- solAssert(accessorType.returnParameterTypes().size() >= 1, "");
- auto const& returnTypes = accessorType.returnParameterTypes();
+ auto returnTypes = accessorType.returnParameterTypes();
+ solAssert(returnTypes.size() >= 1, "");
if (StructType const* structType = dynamic_cast<StructType const*>(returnType.get()))
{
// remove offset
@@ -518,7 +520,23 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
arguments[i]->accept(*this);
utils().convertType(*arguments[i]->annotation().type, *function.parameterTypes()[i]);
}
- _functionCall.expression().accept(*this);
+
+ {
+ bool shortcutTaken = false;
+ if (auto identifier = dynamic_cast<Identifier const*>(&_functionCall.expression()))
+ if (auto functionDef = dynamic_cast<FunctionDefinition const*>(identifier->annotation().referencedDeclaration))
+ {
+ // Do not directly visit the identifier, because this way, we can avoid
+ // the runtime entry label to be created at the creation time context.
+ CompilerContext::LocationSetter locationSetter2(m_context, *identifier);
+ utils().pushCombinedFunctionEntryLabel(m_context.resolveVirtualFunction(*functionDef), false);
+ shortcutTaken = true;
+ }
+
+ if (!shortcutTaken)
+ _functionCall.expression().accept(*this);
+ }
+
unsigned parameterSize = CompilerUtils::sizeOnStack(function.parameterTypes());
if (function.bound())
{
@@ -592,7 +610,8 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << Instruction::CREATE;
// Check if zero (out of stack or not enough balance).
m_context << Instruction::DUP1 << Instruction::ISZERO;
- m_context.appendConditionalRevert();
+ // TODO: Can we bubble up here? There might be different reasons for failure, I think.
+ m_context.appendConditionalRevert(true);
if (function.valueSet())
m_context << swapInstruction(1) << Instruction::POP;
break;
@@ -654,8 +673,9 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
if (function.kind() == FunctionType::Kind::Transfer)
{
// Check if zero (out of stack or not enough balance).
+ // TODO: bubble up here, but might also be different error.
m_context << Instruction::ISZERO;
- m_context.appendConditionalRevert();
+ m_context.appendConditionalRevert(true);
}
break;
case FunctionType::Kind::Selfdestruct:
@@ -664,8 +684,19 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
m_context << Instruction::SELFDESTRUCT;
break;
case FunctionType::Kind::Revert:
- m_context.appendRevert();
+ {
+ if (!arguments.empty())
+ {
+ // function-sel(Error(string)) + encoding
+ solAssert(arguments.size() == 1, "");
+ solAssert(function.parameterTypes().size() == 1, "");
+ arguments.front()->accept(*this);
+ utils().revertWithStringData(*arguments.front()->annotation().type);
+ }
+ else
+ m_context.appendRevert();
break;
+ }
case FunctionType::Kind::SHA3:
{
TypePointers argumentTypes;
@@ -805,24 +836,27 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
function.kind() == FunctionType::Kind::ArrayPush ?
make_shared<ArrayType>(DataLocation::Storage, paramType) :
make_shared<ArrayType>(DataLocation::Storage);
- // get the current length
- ArrayUtils(m_context).retrieveLength(*arrayType);
- m_context << Instruction::DUP1;
- // stack: ArrayReference currentLength currentLength
- m_context << u256(1) << Instruction::ADD;
- // stack: ArrayReference currentLength newLength
- m_context << Instruction::DUP3 << Instruction::DUP2;
- ArrayUtils(m_context).resizeDynamicArray(*arrayType);
- m_context << Instruction::SWAP2 << Instruction::SWAP1;
- // stack: newLength ArrayReference oldLength
- ArrayUtils(m_context).accessIndex(*arrayType, false);
- // stack: newLength storageSlot slotOffset
+ // stack: ArrayReference
arguments[0]->accept(*this);
+ TypePointer const& argType = arguments[0]->annotation().type;
+ // stack: ArrayReference argValue
+ utils().moveToStackTop(argType->sizeOnStack(), 1);
+ // stack: argValue ArrayReference
+ m_context << Instruction::DUP1;
+ ArrayUtils(m_context).incrementDynamicArraySize(*arrayType);
+ // stack: argValue ArrayReference newLength
+ m_context << Instruction::SWAP1;
+ // stack: argValue newLength ArrayReference
+ m_context << u256(1) << Instruction::DUP3 << Instruction::SUB;
+ // stack: argValue newLength ArrayReference (newLength-1)
+ ArrayUtils(m_context).accessIndex(*arrayType, false);
+ // stack: argValue newLength storageSlot slotOffset
+ utils().moveToStackTop(3, argType->sizeOnStack());
// stack: newLength storageSlot slotOffset argValue
TypePointer type = arguments[0]->annotation().type->closestTemporaryType(arrayType->baseType());
solAssert(type, "");
- utils().convertType(*arguments[0]->annotation().type, *type);
+ utils().convertType(*argType, *type);
utils().moveToStackTop(1 + type->sizeOnStack());
utils().moveToStackTop(1 + type->sizeOnStack());
// stack: newLength argValue storageSlot slotOffset
@@ -834,8 +868,6 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
}
case FunctionType::Kind::ObjectCreation:
{
- // Will allocate at the end of memory (MSIZE) and not write at all unless the base
- // type is dynamically sized.
ArrayType const& arrayType = dynamic_cast<ArrayType const&>(*_functionCall.annotation().type);
_functionCall.expression().accept(*this);
solAssert(arguments.size() == 1, "");
@@ -845,15 +877,7 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
utils().convertType(*arguments[0]->annotation().type, IntegerType(256));
// Stack: requested_length
- // Allocate at max(MSIZE, freeMemoryPointer)
utils().fetchFreeMemoryPointer();
- m_context << Instruction::DUP1 << Instruction::MSIZE;
- m_context << Instruction::LT;
- auto initialise = m_context.appendConditionalJump();
- // Free memory pointer does not point to empty memory, use MSIZE.
- m_context << Instruction::POP;
- m_context << Instruction::MSIZE;
- m_context << initialise;
// Stack: requested_length memptr
m_context << Instruction::SWAP1;
@@ -878,13 +902,10 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
// Check if length is zero
m_context << Instruction::DUP1 << Instruction::ISZERO;
auto skipInit = m_context.appendConditionalJump();
-
- // We only have to initialise if the base type is a not a value type.
- if (dynamic_cast<ReferenceType const*>(arrayType.baseType().get()))
- {
- m_context << Instruction::DUP2 << u256(32) << Instruction::ADD;
- utils().zeroInitialiseMemoryArray(arrayType);
- }
+ // Always initialize because the free memory pointer might point at
+ // a dirty memory area.
+ m_context << Instruction::DUP2 << u256(32) << Instruction::ADD;
+ utils().zeroInitialiseMemoryArray(arrayType);
m_context << skipInit;
m_context << Instruction::POP;
break;
@@ -894,16 +915,130 @@ bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
{
arguments.front()->accept(*this);
utils().convertType(*arguments.front()->annotation().type, *function.parameterTypes().front(), false);
+ if (arguments.size() > 1)
+ {
+ // Users probably expect the second argument to be evaluated
+ // even if the condition is false, as would be the case for an actual
+ // function call.
+ solAssert(arguments.size() == 2, "");
+ solAssert(function.kind() == FunctionType::Kind::Require, "");
+ arguments.at(1)->accept(*this);
+ utils().moveIntoStack(1, arguments.at(1)->annotation().type->sizeOnStack());
+ }
+ // Stack: <error string (unconverted)> <condition>
// jump if condition was met
m_context << Instruction::ISZERO << Instruction::ISZERO;
auto success = m_context.appendConditionalJump();
if (function.kind() == FunctionType::Kind::Assert)
// condition was not met, flag an error
m_context.appendInvalid();
+ else if (arguments.size() > 1)
+ utils().revertWithStringData(*arguments.at(1)->annotation().type);
else
m_context.appendRevert();
// the success branch
m_context << success;
+ if (arguments.size() > 1)
+ utils().popStackElement(*arguments.at(1)->annotation().type);
+ break;
+ }
+ case FunctionType::Kind::ABIEncode:
+ case FunctionType::Kind::ABIEncodePacked:
+ case FunctionType::Kind::ABIEncodeWithSelector:
+ case FunctionType::Kind::ABIEncodeWithSignature:
+ {
+ bool const isPacked = function.kind() == FunctionType::Kind::ABIEncodePacked;
+ bool const hasSelectorOrSignature =
+ function.kind() == FunctionType::Kind::ABIEncodeWithSelector ||
+ function.kind() == FunctionType::Kind::ABIEncodeWithSignature;
+
+ TypePointers argumentTypes;
+ TypePointers targetTypes;
+ for (unsigned i = 0; i < arguments.size(); ++i)
+ {
+ arguments[i]->accept(*this);
+ // Do not keep the selector as part of the ABI encoded args
+ if (!hasSelectorOrSignature || i > 0)
+ argumentTypes.push_back(arguments[i]->annotation().type);
+ }
+ utils().fetchFreeMemoryPointer();
+ // stack now: [<selector>] <arg1> .. <argN> <free_mem>
+
+ // adjust by 32(+4) bytes to accommodate the length(+selector)
+ m_context << u256(32 + (hasSelectorOrSignature ? 4 : 0)) << Instruction::ADD;
+ // stack now: [<selector>] <arg1> .. <argN> <data_encoding_area_start>
+
+ if (isPacked)
+ {
+ solAssert(!function.padArguments(), "");
+ utils().packedEncode(argumentTypes, TypePointers());
+ }
+ else
+ {
+ solAssert(function.padArguments(), "");
+ utils().abiEncode(argumentTypes, TypePointers());
+ }
+ utils().fetchFreeMemoryPointer();
+ // stack: [<selector>] <data_encoding_area_end> <bytes_memory_ptr>
+
+ // size is end minus start minus length slot
+ m_context.appendInlineAssembly(R"({
+ mstore(mem_ptr, sub(sub(mem_end, mem_ptr), 0x20))
+ })", {"mem_end", "mem_ptr"});
+ m_context << Instruction::SWAP1;
+ utils().storeFreeMemoryPointer();
+ // stack: [<selector>] <memory ptr>
+
+ if (hasSelectorOrSignature)
+ {
+ // stack: <selector> <memory pointer>
+ solAssert(arguments.size() >= 1, "");
+ TypePointer const& selectorType = arguments[0]->annotation().type;
+ utils().moveIntoStack(selectorType->sizeOnStack());
+ TypePointer dataOnStack = selectorType;
+ // stack: <memory pointer> <selector>
+ if (function.kind() == FunctionType::Kind::ABIEncodeWithSignature)
+ {
+ // hash the signature
+ if (auto const* stringType = dynamic_cast<StringLiteralType const*>(selectorType.get()))
+ {
+ FixedHash<4> hash(dev::keccak256(stringType->value()));
+ m_context << (u256(FixedHash<4>::Arith(hash)) << (256 - 32));
+ dataOnStack = make_shared<FixedBytesType>(4);
+ }
+ else
+ {
+ utils().fetchFreeMemoryPointer();
+ // stack: <memory pointer> <selector> <free mem ptr>
+ utils().packedEncode(TypePointers{selectorType}, TypePointers());
+ utils().toSizeAfterFreeMemoryPointer();
+ m_context << Instruction::KECCAK256;
+ // stack: <memory pointer> <hash>
+
+ dataOnStack = make_shared<FixedBytesType>(32);
+ }
+ }
+ else
+ {
+ solAssert(function.kind() == FunctionType::Kind::ABIEncodeWithSelector, "");
+ }
+
+ utils().convertType(*dataOnStack, FixedBytesType(4), true);
+
+ // stack: <memory pointer> <selector>
+
+ // load current memory, mask and combine the selector
+ string mask = formatNumber((u256(-1) >> 32));
+ m_context.appendInlineAssembly(R"({
+ let data_start := add(mem_ptr, 0x20)
+ let data := mload(data_start)
+ let mask := )" + mask + R"(
+ mstore(data_start, or(and(data, mask), selector))
+ })", {"mem_ptr", "selector"});
+ m_context << Instruction::POP;
+ }
+
+ // stack now: <memory pointer>
break;
}
case FunctionType::Kind::GasLeft:
@@ -1147,6 +1282,9 @@ bool ExpressionCompiler::visit(MemberAccess const& _memberAccess)
else if (member == "sig")
m_context << u256(0) << Instruction::CALLDATALOAD
<< (u256(0xffffffff) << (256 - 32)) << Instruction::AND;
+ else if (member == "blockhash")
+ {
+ }
else
solAssert(false, "Unknown magic member.");
break;
@@ -1356,6 +1494,10 @@ void ExpressionCompiler::endVisit(Identifier const& _identifier)
}
}
else if (FunctionDefinition const* functionDef = dynamic_cast<FunctionDefinition const*>(declaration))
+ // If the identifier is called right away, this code is executed in visit(FunctionCall...), because
+ // we want to avoid having a reference to the runtime function entry point in the
+ // constructor context, since this would force the compiler to include unreferenced
+ // internal functions in the runtime contex.
utils().pushCombinedFunctionEntryLabel(m_context.resolveVirtualFunction(*functionDef));
else if (auto variable = dynamic_cast<VariableDeclaration const*>(declaration))
appendVariable(*variable, static_cast<Expression const&>(_identifier));
@@ -1615,15 +1757,27 @@ void ExpressionCompiler::appendExternalFunctionCall(
m_context.experimentalFeatureActive(ExperimentalFeature::V050) &&
m_context.evmVersion().hasStaticCall();
+ bool haveReturndatacopy = m_context.evmVersion().supportsReturndata();
unsigned retSize = 0;
+ TypePointers returnTypes;
if (returnSuccessCondition)
retSize = 0; // return value actually is success condition
+ else if (haveReturndatacopy)
+ returnTypes = _functionType.returnParameterTypes();
else
- for (auto const& retType: _functionType.returnParameterTypes())
+ returnTypes = _functionType.returnParameterTypesWithoutDynamicTypes();
+
+ bool dynamicReturnSize = false;
+ for (auto const& retType: returnTypes)
+ if (retType->isDynamicallyEncoded())
{
- solAssert(!retType->isDynamicallySized(), "Unable to return dynamic type from external call.");
- retSize += retType->calldataEncodedSize();
+ solAssert(haveReturndatacopy, "");
+ dynamicReturnSize = true;
+ retSize = 0;
+ break;
}
+ else
+ retSize += retType->calldataEncodedSize();
// Evaluate arguments.
TypePointers argumentTypes;
@@ -1755,6 +1909,7 @@ void ExpressionCompiler::appendExternalFunctionCall(
if (funKind == FunctionType::Kind::External || funKind == FunctionType::Kind::CallCode || funKind == FunctionType::Kind::DelegateCall)
{
m_context << Instruction::DUP1 << Instruction::EXTCODESIZE << Instruction::ISZERO;
+ // TODO: error message?
m_context.appendConditionalRevert();
existenceChecked = true;
}
@@ -1797,7 +1952,7 @@ void ExpressionCompiler::appendExternalFunctionCall(
{
//Propagate error condition (if CALL pushes 0 on stack).
m_context << Instruction::ISZERO;
- m_context.appendConditionalRevert();
+ m_context.appendConditionalRevert(true);
}
utils().popStackSlots(remainsSize);
@@ -1821,20 +1976,42 @@ void ExpressionCompiler::appendExternalFunctionCall(
utils().fetchFreeMemoryPointer();
m_context << Instruction::SUB << Instruction::MLOAD;
}
- else if (!_functionType.returnParameterTypes().empty())
+ else if (!returnTypes.empty())
{
utils().fetchFreeMemoryPointer();
- bool memoryNeeded = false;
- for (auto const& retType: _functionType.returnParameterTypes())
+ // Stack: return_data_start
+
+ // The old decoder did not allocate any memory (i.e. did not touch the free
+ // memory pointer), but kept references to the return data for
+ // (statically-sized) arrays
+ bool needToUpdateFreeMemoryPtr = false;
+ if (dynamicReturnSize || m_context.experimentalFeatureActive(ExperimentalFeature::ABIEncoderV2))
+ needToUpdateFreeMemoryPtr = true;
+ else
+ for (auto const& retType: returnTypes)
+ if (dynamic_cast<ReferenceType const*>(retType.get()))
+ needToUpdateFreeMemoryPtr = true;
+
+ // Stack: return_data_start
+ if (dynamicReturnSize)
{
- utils().loadFromMemoryDynamic(*retType, false, true, true);
- if (dynamic_cast<ReferenceType const*>(retType.get()))
- memoryNeeded = true;
+ solAssert(haveReturndatacopy, "");
+ m_context.appendInlineAssembly("{ returndatacopy(return_data_start, 0, returndatasize()) }", {"return_data_start"});
}
- if (memoryNeeded)
- utils().storeFreeMemoryPointer();
else
- m_context << Instruction::POP;
+ solAssert(retSize > 0, "");
+ // Always use the actual return length, and not our calculated expected length, if returndatacopy is supported.
+ // This ensures it can catch badly formatted input from external calls.
+ m_context << (haveReturndatacopy ? eth::AssemblyItem(Instruction::RETURNDATASIZE) : u256(retSize));
+ // Stack: return_data_start return_data_size
+ if (needToUpdateFreeMemoryPtr)
+ m_context.appendInlineAssembly(R"({
+ // round size to the next multiple of 32
+ let newMem := add(start, and(add(size, 0x1f), not(0x1f)))
+ mstore(0x40, newMem)
+ })", {"start", "size"});
+
+ utils().abiDecode(returnTypes, true, true);
}
}