diff options
-rw-r--r-- | ArrayUtils.cpp | 177 | ||||
-rw-r--r-- | ArrayUtils.h | 4 | ||||
-rw-r--r-- | Compiler.cpp | 13 | ||||
-rw-r--r-- | Compiler.h | 8 | ||||
-rw-r--r-- | CompilerContext.cpp | 9 | ||||
-rw-r--r-- | CompilerContext.h | 1 | ||||
-rw-r--r-- | CompilerUtils.cpp | 153 | ||||
-rw-r--r-- | CompilerUtils.h | 7 | ||||
-rw-r--r-- | ExpressionCompiler.cpp | 56 | ||||
-rw-r--r-- | ExpressionCompiler.h | 7 | ||||
-rw-r--r-- | Types.cpp | 32 | ||||
-rw-r--r-- | Types.h | 1 |
12 files changed, 324 insertions, 144 deletions
diff --git a/ArrayUtils.cpp b/ArrayUtils.cpp index e138e951..3be12af7 100644 --- a/ArrayUtils.cpp +++ b/ArrayUtils.cpp @@ -231,6 +231,181 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons m_context << u256(0); } +void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWordBoundaries) const +{ + solAssert( + _sourceType.getBaseType()->getCalldataEncodedSize() > 0, + "Nested arrays not yet implemented here." + ); + unsigned baseSize = 1; + if (!_sourceType.isByteArray()) + // We always pad the elements, regardless of _padToWordBoundaries. + baseSize = _sourceType.getBaseType()->getCalldataEncodedSize(); + + if (_sourceType.location() == DataLocation::CallData) + { + if (!_sourceType.isDynamicallySized()) + m_context << _sourceType.getLength(); + if (_sourceType.getBaseType()->getCalldataEncodedSize() > 1) + m_context << u256(baseSize) << eth::Instruction::MUL; + // stack: target source_offset source_len + m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5; + // stack: target source_offset source_len source_len source_offset target + m_context << eth::Instruction::CALLDATACOPY; + m_context << eth::Instruction::DUP3 << eth::Instruction::ADD; + m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP; + } + else if (_sourceType.location() == DataLocation::Memory) + { + // memcpy using the built-in contract + retrieveLength(_sourceType); + if (_sourceType.isDynamicallySized()) + { + // change pointer to data part + m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD; + m_context << eth::Instruction::SWAP1; + } + // convert length to size + if (baseSize > 1) + m_context << u256(baseSize) << eth::Instruction::MUL; + // stack: <target> <source> <size> + //@TODO do not use ::CALL if less than 32 bytes? + m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4 << eth::Instruction::DUP4; + CompilerUtils(m_context).memoryCopy(); + + m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; + // stack: <target> <size> + + bool paddingNeeded = false; + if (_sourceType.isDynamicallySized()) + paddingNeeded = _padToWordBoundaries && ((baseSize % 32) != 0); + else + paddingNeeded = _padToWordBoundaries && (((_sourceType.getLength() * baseSize) % 32) != 0); + if (paddingNeeded) + { + // stack: <target> <size> + m_context << eth::Instruction::SWAP1 << eth::Instruction::DUP2 << eth::Instruction::ADD; + // stack: <length> <target + size> + m_context << eth::Instruction::SWAP1 << u256(31) << eth::Instruction::AND; + // stack: <target + size> <remainder = size % 32> + eth::AssemblyItem skip = m_context.newTag(); + if (_sourceType.isDynamicallySized()) + { + m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO; + m_context.appendConditionalJumpTo(skip); + } + // round off, load from there. + // stack <target + size> <remainder = size % 32> + m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3; + m_context << eth::Instruction::SUB; + // stack: target+size remainder <target + size - remainder> + m_context << eth::Instruction::DUP1 << eth::Instruction::MLOAD; + // Now we AND it with ~(2**(8 * (32 - remainder)) - 1) + m_context << u256(1); + m_context << eth::Instruction::DUP4 << u256(32) << eth::Instruction::SUB; + // stack: ...<v> 1 <32 - remainder> + m_context << u256(0x100) << eth::Instruction::EXP << eth::Instruction::SUB; + m_context << eth::Instruction::NOT << eth::Instruction::AND; + // stack: target+size remainder target+size-remainder <v & ...> + m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE; + // stack: target+size remainder target+size-remainder + m_context << u256(32) << eth::Instruction::ADD; + // stack: target+size remainder <new_padded_end> + m_context << eth::Instruction::SWAP2 << eth::Instruction::POP; + + if (_sourceType.isDynamicallySized()) + m_context << skip.tag(); + // stack <target + "size"> <remainder = size % 32> + m_context << eth::Instruction::POP; + } + else + // stack: <target> <size> + m_context << eth::Instruction::ADD; + } + else + { + solAssert(_sourceType.location() == DataLocation::Storage, ""); + unsigned storageBytes = _sourceType.getBaseType()->getStorageBytes(); + u256 storageSize = _sourceType.getBaseType()->getStorageSize(); + solAssert(storageSize > 1 || (storageSize == 1 && storageBytes > 0), ""); + + m_context << eth::Instruction::POP; // remove offset, arrays always start new slot + retrieveLength(_sourceType); + // stack here: memory_offset storage_offset length + // jump to end if length is zero + m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO; + eth::AssemblyItem loopEnd = m_context.newTag(); + m_context.appendConditionalJumpTo(loopEnd); + // compute memory end offset + if (baseSize > 1) + // convert length to memory size + m_context << u256(baseSize) << eth::Instruction::MUL; + m_context << eth::Instruction::DUP3 << eth::Instruction::ADD << eth::Instruction::SWAP2; + if (_sourceType.isDynamicallySized()) + { + // actual array data is stored at SHA3(storage_offset) + m_context << eth::Instruction::SWAP1; + CompilerUtils(m_context).computeHashStatic(); + m_context << eth::Instruction::SWAP1; + } + + // stack here: memory_end_offset storage_data_offset memory_offset + bool haveByteOffset = !_sourceType.isByteArray() && storageBytes <= 16; + if (haveByteOffset) + m_context << u256(0) << eth::Instruction::SWAP1; + // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset + eth::AssemblyItem loopStart = m_context.newTag(); + m_context << loopStart; + // load and store + if (_sourceType.isByteArray()) + { + // Packed both in storage and memory. + m_context << eth::Instruction::DUP2 << eth::Instruction::SLOAD; + m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE; + // increment storage_data_offset by 1 + m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD; + // increment memory offset by 32 + m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD; + } + else + { + // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset + if (haveByteOffset) + m_context << eth::Instruction::DUP3 << eth::Instruction::DUP3; + else + m_context << eth::Instruction::DUP2 << u256(0); + StorageItem(m_context, *_sourceType.getBaseType()).retrieveValue(SourceLocation(), true); + CompilerUtils(m_context).storeInMemoryDynamic(*_sourceType.getBaseType()); + // increment storage_data_offset and byte offset + if (haveByteOffset) + incrementByteOffset(storageBytes, 2, 3); + else + { + m_context << eth::Instruction::SWAP1; + m_context << storageSize << eth::Instruction::ADD; + m_context << eth::Instruction::SWAP1; + } + } + // check for loop condition + m_context << eth::Instruction::DUP1 << eth::dupInstruction(haveByteOffset ? 5 : 4) << eth::Instruction::GT; + m_context.appendConditionalJumpTo(loopStart); + // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset + if (haveByteOffset) + m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; + if (_padToWordBoundaries && baseSize % 32 != 0) + { + // memory_end_offset - start is the actual length (we want to compute the ceil of). + // memory_offset - start is its next multiple of 32, but it might be off by 32. + // so we compute: memory_end_offset += (memory_offset - memory_end_offest) & 31 + m_context << eth::Instruction::DUP3 << eth::Instruction::SWAP1 << eth::Instruction::SUB; + m_context << u256(31) << eth::Instruction::AND; + m_context << eth::Instruction::DUP3 << eth::Instruction::ADD; + m_context << eth::Instruction::SWAP2; + } + m_context << loopEnd << eth::Instruction::POP << eth::Instruction::POP; + } +} + void ArrayUtils::clearArray(ArrayType const& _type) const { unsigned stackHeightStart = m_context.getStackHeight(); @@ -499,6 +674,8 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL; } m_context << eth::Instruction::ADD; + //@todo we should also load if it is a reference type of dynamic length + // but we should apply special logic if we load from calldata. if (_arrayType.getBaseType()->isValueType()) CompilerUtils(m_context).loadFromMemoryDynamic( *_arrayType.getBaseType(), diff --git a/ArrayUtils.h b/ArrayUtils.h index dab40e2d..8d56f3c8 100644 --- a/ArrayUtils.h +++ b/ArrayUtils.h @@ -44,6 +44,10 @@ public: /// Stack pre: source_reference [source_byte_offset/source_length] target_reference target_byte_offset /// Stack post: target_reference target_byte_offset void copyArrayToStorage(ArrayType const& _targetType, ArrayType const& _sourceType) const; + /// Copies an array (which cannot be dynamically nested) from anywhere to memory. + /// Stack pre: memory_offset source_item + /// Stack post: memory_offest + length(padded) + void copyArrayToMemory(ArrayType const& _sourceType, bool _padToWordBoundaries = true) const; /// Clears the given dynamic or static array. /// Stack pre: storage_ref storage_byte_offset /// Stack post: diff --git a/Compiler.cpp b/Compiler.cpp index f5570b98..b05a7a9b 100644 --- a/Compiler.cpp +++ b/Compiler.cpp @@ -392,9 +392,9 @@ bool Compiler::visit(FunctionDefinition const& _function) } for (ASTPointer<VariableDeclaration const> const& variable: _function.getReturnParameters()) - m_context.addAndInitializeVariable(*variable); + appendStackVariableInitialisation(*variable); for (VariableDeclaration const* localVariable: _function.getLocalVariables()) - m_context.addAndInitializeVariable(*localVariable); + appendStackVariableInitialisation(*localVariable); if (_function.isConstructor()) if (auto c = m_context.getNextConstructor(dynamic_cast<ContractDefinition const&>(*_function.getScope()))) @@ -639,7 +639,7 @@ void Compiler::appendModifierOrFunctionCode() modifier.getParameters()[i]->getType()); } for (VariableDeclaration const* localVariable: modifier.getLocalVariables()) - m_context.addAndInitializeVariable(*localVariable); + appendStackVariableInitialisation(*localVariable); unsigned const c_stackSurplus = CompilerUtils::getSizeOnStack(modifier.getParameters()) + CompilerUtils::getSizeOnStack(modifier.getLocalVariables()); @@ -653,6 +653,13 @@ void Compiler::appendModifierOrFunctionCode() } } +void Compiler::appendStackVariableInitialisation(VariableDeclaration const& _variable) +{ + CompilerContext::LocationSetter location(m_context, _variable); + m_context.addVariable(_variable); + ExpressionCompiler(m_context).appendStackVariableInitialisation(*_variable.getType()); +} + void Compiler::compileExpression(Expression const& _expression, TypePointer const& _targetType) { ExpressionCompiler expressionCompiler(m_context, m_optimize); @@ -84,6 +84,13 @@ private: void registerStateVariables(ContractDefinition const& _contract); void initializeStateVariables(ContractDefinition const& _contract); + /// Initialises all memory arrays in the local variables to point to an empty location. + void initialiseMemoryArrays(std::vector<VariableDeclaration const*> _variables); + /// Pushes the initialised value of the given type to the stack. If the type is a memory + /// reference type, allocates memory and pushes the memory pointer. + /// Not to be used for storage references. + void initialiseInMemory(Type const& _type); + virtual bool visit(VariableDeclaration const& _variableDeclaration) override; virtual bool visit(FunctionDefinition const& _function) override; virtual bool visit(IfStatement const& _ifStatement) override; @@ -100,6 +107,7 @@ private: /// body itself if the last modifier was reached. void appendModifierOrFunctionCode(); + void appendStackVariableInitialisation(VariableDeclaration const& _variable); void compileExpression(Expression const& _expression, TypePointer const& _targetType = TypePointer()); bool const m_optimize; diff --git a/CompilerContext.cpp b/CompilerContext.cpp index fde6adac..0f6f5fe7 100644 --- a/CompilerContext.cpp +++ b/CompilerContext.cpp @@ -65,15 +65,6 @@ void CompilerContext::removeVariable(VariableDeclaration const& _declaration) m_localVariables.erase(&_declaration); } -void CompilerContext::addAndInitializeVariable(VariableDeclaration const& _declaration) -{ - LocationSetter locationSetter(*this, _declaration); - addVariable(_declaration); - int const size = _declaration.getType()->getSizeOnStack(); - for (int i = 0; i < size; ++i) - *this << u256(0); -} - bytes const& CompilerContext::getCompiledContract(const ContractDefinition& _contract) const { auto ret = m_compiledContracts.find(&_contract); diff --git a/CompilerContext.h b/CompilerContext.h index 998b0a2f..3f97d900 100644 --- a/CompilerContext.h +++ b/CompilerContext.h @@ -46,7 +46,6 @@ public: void addStateVariable(VariableDeclaration const& _declaration, u256 const& _storageOffset, unsigned _byteOffset); void addVariable(VariableDeclaration const& _declaration, unsigned _offsetToCurrent = 0); void removeVariable(VariableDeclaration const& _declaration); - void addAndInitializeVariable(VariableDeclaration const& _declaration); void setCompiledContracts(std::map<ContractDefinition const*, bytes const*> const& _contracts) { m_compiledContracts = _contracts; } bytes const& getCompiledContract(ContractDefinition const& _contract) const; diff --git a/CompilerUtils.cpp b/CompilerUtils.cpp index e763a394..47a9a354 100644 --- a/CompilerUtils.cpp +++ b/CompilerUtils.cpp @@ -25,6 +25,7 @@ #include <libevmcore/Instruction.h> #include <libevmcore/Params.h> #include <libsolidity/ArrayUtils.h> +#include <libsolidity/LValue.h> using namespace std; @@ -101,130 +102,10 @@ void CompilerUtils::storeInMemory(unsigned _offset) void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries) { if (_type.getCategory() == Type::Category::Array) - { - auto const& type = dynamic_cast<ArrayType const&>(_type); - solAssert(type.isByteArray(), "Non byte arrays not yet implemented here."); - - if (type.location() == DataLocation::CallData) - { - if (!type.isDynamicallySized()) - m_context << type.getLength(); - // stack: target source_offset source_len - m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5; - // stack: target source_offset source_len source_len source_offset target - m_context << eth::Instruction::CALLDATACOPY; - m_context << eth::Instruction::DUP3 << eth::Instruction::ADD; - m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP; - } - else if (type.location() == DataLocation::Memory) - { - // memcpy using the built-in contract - ArrayUtils(m_context).retrieveLength(type); - if (type.isDynamicallySized()) - { - // change pointer to data part - m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD; - m_context << eth::Instruction::SWAP1; - } - // stack: <target> <source> <length> - // stack for call: outsize target size source value contract gas - m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4; - m_context << eth::Instruction::DUP2 << eth::Instruction::DUP5; - m_context << u256(0) << u256(identityContractAddress); - //@TODO do not use ::CALL if less than 32 bytes? - //@todo in production, we should not have to pair c_callNewAccountGas. - m_context << u256(eth::c_callGas + 15 + eth::c_callNewAccountGas) << eth::Instruction::GAS; - m_context << eth::Instruction::SUB << eth::Instruction::CALL; - m_context << eth::Instruction::POP; // ignore return value - - m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; - // stack: <target> <length> - - if (_padToWordBoundaries && (type.isDynamicallySized() || (type.getLength()) % 32 != 0)) - { - // stack: <target> <length> - m_context << eth::Instruction::SWAP1 << eth::Instruction::DUP2 << eth::Instruction::ADD; - // stack: <length> <target + length> - m_context << eth::Instruction::SWAP1 << u256(31) << eth::Instruction::AND; - // stack: <target + length> <remainder = length % 32> - eth::AssemblyItem skip = m_context.newTag(); - if (type.isDynamicallySized()) - { - m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO; - m_context.appendConditionalJumpTo(skip); - } - // round off, load from there. - // stack <target + length> <remainder = length % 32> - m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3; - m_context << eth::Instruction::SUB; - // stack: target+length remainder <target + length - remainder> - m_context << eth::Instruction::DUP1 << eth::Instruction::MLOAD; - // Now we AND it with ~(2**(8 * (32 - remainder)) - 1) - m_context << u256(1); - m_context << eth::Instruction::DUP4 << u256(32) << eth::Instruction::SUB; - // stack: ...<v> 1 <32 - remainder> - m_context << u256(0x100) << eth::Instruction::EXP << eth::Instruction::SUB; - m_context << eth::Instruction::NOT << eth::Instruction::AND; - // stack: target+length remainder target+length-remainder <v & ...> - m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE; - // stack: target+length remainder target+length-remainder - m_context << u256(32) << eth::Instruction::ADD; - // stack: target+length remainder <new_padded_end> - m_context << eth::Instruction::SWAP2 << eth::Instruction::POP; - - if (type.isDynamicallySized()) - m_context << skip.tag(); - // stack <target + "length"> <remainder = length % 32> - m_context << eth::Instruction::POP; - } - else - // stack: <target> <length> - m_context << eth::Instruction::ADD; - } - else - { - solAssert(type.location() == DataLocation::Storage, ""); - m_context << eth::Instruction::POP; // remove offset, arrays always start new slot - m_context << eth::Instruction::DUP1 << eth::Instruction::SLOAD; - // stack here: memory_offset storage_offset length_bytes - // jump to end if length is zero - m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO; - eth::AssemblyItem loopEnd = m_context.newTag(); - m_context.appendConditionalJumpTo(loopEnd); - // compute memory end offset - m_context << eth::Instruction::DUP3 << eth::Instruction::ADD << eth::Instruction::SWAP2; - // actual array data is stored at SHA3(storage_offset) - m_context << eth::Instruction::SWAP1; - CompilerUtils(m_context).computeHashStatic(); - m_context << eth::Instruction::SWAP1; - - // stack here: memory_end_offset storage_data_offset memory_offset - eth::AssemblyItem loopStart = m_context.newTag(); - m_context << loopStart; - // load and store - m_context << eth::Instruction::DUP2 << eth::Instruction::SLOAD; - m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE; - // increment storage_data_offset by 1 - m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD; - // increment memory offset by 32 - m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD; - // check for loop condition - m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4 << eth::Instruction::GT; - m_context.appendConditionalJumpTo(loopStart); - // stack here: memory_end_offset storage_data_offset memory_offset - if (_padToWordBoundaries) - { - // memory_end_offset - start is the actual length (we want to compute the ceil of). - // memory_offset - start is its next multiple of 32, but it might be off by 32. - // so we compute: memory_end_offset += (memory_offset - memory_end_offest) & 31 - m_context << eth::Instruction::DUP3 << eth::Instruction::SWAP1 << eth::Instruction::SUB; - m_context << u256(31) << eth::Instruction::AND; - m_context << eth::Instruction::DUP3 << eth::Instruction::ADD; - m_context << eth::Instruction::SWAP2; - } - m_context << loopEnd << eth::Instruction::POP << eth::Instruction::POP; - } - } + ArrayUtils(m_context).copyArrayToMemory( + dynamic_cast<ArrayType const&>(_type), + _padToWordBoundaries + ); else { unsigned numBytes = prepareMemoryStore(_type, _padToWordBoundaries); @@ -339,6 +220,21 @@ void CompilerUtils::encodeToMemory( popStackSlots(argSize + dynPointers + 1); } +void CompilerUtils::memoryCopy() +{ + // Stack here: size target source + // stack for call: outsize target size source value contract gas + //@TODO do not use ::CALL if less than 32 bytes? + m_context << eth::Instruction::DUP3 << eth::Instruction::SWAP1; + m_context << u256(0) << u256(identityContractAddress); + // compute gas costs + m_context << u256(32) << eth::Instruction::DUP5 << u256(31) << eth::Instruction::ADD; + m_context << eth::Instruction::DIV << u256(eth::c_identityWordGas) << eth::Instruction::MUL; + m_context << u256(eth::c_identityGas) << eth::Instruction::ADD; + m_context << eth::Instruction::CALL; + m_context << eth::Instruction::POP; // ignore return value +} + void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded) { // For a type extension, we need to remove all higher-order bits that we might have ignored in @@ -513,7 +409,14 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp break; } default: - solAssert(false, "Invalid type conversion requested."); + solAssert( + false, + "Invalid type conversion " + + _typeOnStack.toString(false) + + " to " + + _targetType.toString(false) + + " requested." + ); } break; } diff --git a/CompilerUtils.h b/CompilerUtils.h index 9a599f7f..a9e07f74 100644 --- a/CompilerUtils.h +++ b/CompilerUtils.h @@ -71,6 +71,8 @@ public: void storeInMemory(unsigned _offset); /// Dynamic version of @see storeInMemory, expects the memory offset below the value on the stack /// and also updates that. For arrays, only copies the data part. + /// @param _padToWordBoundaries if true, adds zeros to pad to multiple of 32 bytes. Array elements + /// are always padded (except for byte arrays), regardless of this parameter. /// Stack pre: memory_offset value... /// Stack post: (memory_offset+length) void storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries = true); @@ -93,6 +95,11 @@ public: bool _copyDynamicDataInPlace = false ); + /// Uses a CALL to the identity contract to perform a memory-to-memory copy. + /// Stack pre: <size> <target> <source> + /// Stack post: + void memoryCopy(); + /// Appends code for an implicit or explicit type conversion. This includes erasing higher /// order bits (@see appendHighBitCleanup) when widening integer but also copy to memory /// if a reference type is converted from calldata or storage to memory. diff --git a/ExpressionCompiler.cpp b/ExpressionCompiler.cpp index 7d6ed346..fb10eb83 100644 --- a/ExpressionCompiler.cpp +++ b/ExpressionCompiler.cpp @@ -56,6 +56,62 @@ void ExpressionCompiler::appendStateVariableInitialization(VariableDeclaration c StorageItem(m_context, _varDecl).storeValue(*_varDecl.getType(), _varDecl.getLocation(), true); } +void ExpressionCompiler::appendStackVariableInitialisation(Type const& _type, bool _toMemory) +{ + CompilerUtils utils(m_context); + auto const* referenceType = dynamic_cast<ReferenceType const*>(&_type); + if (!referenceType || referenceType->location() == DataLocation::Storage) + { + for (size_t i = 0; i < _type.getSizeOnStack(); ++i) + m_context << u256(0); + if (_toMemory) + utils.storeInMemoryDynamic(_type); + return; + } + solAssert(referenceType->location() == DataLocation::Memory, ""); + if (!_toMemory) + { + // allocate memory + utils.fetchFreeMemoryPointer(); + m_context << eth::Instruction::DUP1 << u256(max(32u, _type.getCalldataEncodedSize())); + m_context << eth::Instruction::ADD; + utils.storeFreeMemoryPointer(); + m_context << eth::Instruction::DUP1; + } + + if (auto structType = dynamic_cast<StructType const*>(&_type)) + for (auto const& member: structType->getMembers()) + appendStackVariableInitialisation(*member.type, true); + else if (auto arrayType = dynamic_cast<ArrayType const*>(&_type)) + { + if (arrayType->isDynamicallySized()) + { + // zero length + m_context << u256(0); + CompilerUtils(m_context).storeInMemoryDynamic(IntegerType(256)); + } + else if (arrayType->getLength() > 0) + { + m_context << arrayType->getLength() << eth::Instruction::SWAP1; + // stack: items_to_do memory_pos + auto repeat = m_context.newTag(); + m_context << repeat; + appendStackVariableInitialisation(*arrayType->getBaseType(), true); + m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::SWAP1; + m_context << eth::Instruction::SUB << eth::Instruction::SWAP1; + m_context << eth::Instruction::DUP2; + m_context.appendConditionalJumpTo(repeat); + m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; + } + } + else + solAssert(false, "Requested initialisation for unknown type: " + _type.toString()); + + if (!_toMemory) + // remove the updated memory pointer + m_context << eth::Instruction::POP; +} + void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const& _varDecl) { CompilerContext::LocationSetter locationSetter(m_context, _varDecl); diff --git a/ExpressionCompiler.h b/ExpressionCompiler.h index 642560c6..747e241e 100644 --- a/ExpressionCompiler.h +++ b/ExpressionCompiler.h @@ -64,6 +64,13 @@ public: /// Appends code to set a state variable to its initial value/expression. void appendStateVariableInitialization(VariableDeclaration const& _varDecl); + /// Appends code to initialise a local variable. + /// If @a _toMemory is false, leaves the value on the stack. For memory references, this + /// allocates new memory. + /// If @a _toMemory is true, directly stores the data in the memory pos on the stack and + /// updates it. + void appendStackVariableInitialisation(Type const& _type, bool _toMemory = false); + /// Appends code for a State Variable accessor function void appendStateVariableAccessor(VariableDeclaration const& _varDecl); @@ -721,9 +721,13 @@ bool ArrayType::isImplicitlyConvertibleTo(const Type& _convertTo) const } else { - // Require that the base type is the same, not only convertible. - // This disallows assignment of nested arrays from storage to memory for now. - if (*getBaseType() != *convertTo.getBaseType()) + // Conversion to storage pointer or to memory, we de not copy element-for-element here, so + // require that the base type is the same, not only convertible. + // This disallows assignment of nested dynamic arrays from storage to memory for now. + if ( + *copyForLocationIfReference(location(), getBaseType()) != + *copyForLocationIfReference(location(), convertTo.getBaseType()) + ) return false; if (isDynamicallySized() != convertTo.isDynamicallySized()) return false; @@ -822,16 +826,16 @@ string ArrayType::toString(bool _short) const TypePointer ArrayType::externalType() const { if (m_arrayKind != ArrayKind::Ordinary) - return this->copyForLocation(DataLocation::CallData, true); + return this->copyForLocation(DataLocation::Memory, true); if (!m_baseType->externalType()) return TypePointer(); if (m_baseType->getCategory() == Category::Array && m_baseType->isDynamicallySized()) return TypePointer(); if (isDynamicallySized()) - return std::make_shared<ArrayType>(DataLocation::CallData, m_baseType->externalType()); + return std::make_shared<ArrayType>(DataLocation::Memory, m_baseType->externalType()); else - return std::make_shared<ArrayType>(DataLocation::CallData, m_baseType->externalType(), m_length); + return std::make_shared<ArrayType>(DataLocation::Memory, m_baseType->externalType(), m_length); } TypePointer ArrayType::copyForLocation(DataLocation _location, bool _isPointer) const @@ -970,6 +974,22 @@ bool StructType::operator==(Type const& _other) const return ReferenceType::operator==(other) && other.m_struct == m_struct; } +unsigned StructType::getCalldataEncodedSize(bool _padded) const +{ + unsigned size = 0; + for (auto const& member: getMembers()) + if (!member.type->canLiveOutsideStorage()) + return 0; + else + { + unsigned memberSize = member.type->getCalldataEncodedSize(_padded); + if (memberSize == 0) + return 0; + size += memberSize; + } + return size; +} + u256 StructType::getStorageSize() const { return max<u256>(1, getMembers().getStorageSize()); @@ -542,6 +542,7 @@ public: virtual bool isImplicitlyConvertibleTo(const Type& _convertTo) const override; virtual TypePointer unaryOperatorResult(Token::Value _operator) const override; virtual bool operator==(Type const& _other) const override; + virtual unsigned getCalldataEncodedSize(bool _padded) const override; virtual u256 getStorageSize() const override; virtual bool canLiveOutsideStorage() const override; virtual unsigned getSizeOnStack() const override { return 2; } |