aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorchriseth <c@ethdev.com>2015-06-23 20:55:33 +0800
committerchriseth <c@ethdev.com>2015-06-24 18:18:19 +0800
commit1add48a652ea695032d8c664fad3ea84afbfb9ea (patch)
tree1410f848aeaf32b61d7745999c85334275151338
parentfd1a01bbce5b5b6491e05b87fb183a55e9804f4e (diff)
downloaddexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.tar
dexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.tar.gz
dexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.tar.bz2
dexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.tar.lz
dexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.tar.xz
dexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.tar.zst
dexon-solidity-1add48a652ea695032d8c664fad3ea84afbfb9ea.zip
Copy routines for non-byte arrays.
-rw-r--r--ArrayUtils.cpp175
-rw-r--r--ArrayUtils.h4
-rw-r--r--CompilerUtils.cpp144
-rw-r--r--CompilerUtils.h7
-rw-r--r--Types.cpp10
5 files changed, 213 insertions, 127 deletions
diff --git a/ArrayUtils.cpp b/ArrayUtils.cpp
index e138e951..a7cf4792 100644
--- a/ArrayUtils.cpp
+++ b/ArrayUtils.cpp
@@ -231,6 +231,181 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
m_context << u256(0);
}
+void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWordBoundaries) const
+{
+ solAssert(
+ _sourceType.getBaseType()->getCalldataEncodedSize() > 0,
+ "Nested arrays not yet implemented here."
+ );
+ unsigned baseSize = 1;
+ if (!_sourceType.isByteArray())
+ // We always pad the elements, regardless of _padToWordBoundaries.
+ baseSize = _sourceType.getBaseType()->getCalldataEncodedSize();
+
+ if (_sourceType.location() == DataLocation::CallData)
+ {
+ if (!_sourceType.isDynamicallySized())
+ m_context << _sourceType.getLength();
+ if (_sourceType.getBaseType()->getCalldataEncodedSize() > 1)
+ m_context << u256(baseSize) << eth::Instruction::MUL;
+ // stack: target source_offset source_len
+ m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5;
+ // stack: target source_offset source_len source_len source_offset target
+ m_context << eth::Instruction::CALLDATACOPY;
+ m_context << eth::Instruction::DUP3 << eth::Instruction::ADD;
+ m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP;
+ }
+ else if (_sourceType.location() == DataLocation::Memory)
+ {
+ // memcpy using the built-in contract
+ retrieveLength(_sourceType);
+ if (_sourceType.isDynamicallySized())
+ {
+ // change pointer to data part
+ m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD;
+ m_context << eth::Instruction::SWAP1;
+ }
+ // convert length to size
+ if (baseSize > 1)
+ m_context << u256(baseSize) << eth::Instruction::MUL;
+ // stack: <target> <source> <size>
+ //@TODO do not use ::CALL if less than 32 bytes?
+ m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4 << eth::Instruction::DUP4;
+ CompilerUtils(m_context).memoryCopy();
+
+ m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
+ // stack: <target> <size>
+
+ bool paddingNeeded = false;
+ if (_sourceType.isDynamicallySized())
+ paddingNeeded = _padToWordBoundaries && ((baseSize % 32) != 0);
+ else
+ paddingNeeded = _padToWordBoundaries && (((_sourceType.getLength() * baseSize) % 32) != 0);
+ if (paddingNeeded)
+ {
+ // stack: <target> <size>
+ m_context << eth::Instruction::SWAP1 << eth::Instruction::DUP2 << eth::Instruction::ADD;
+ // stack: <length> <target + size>
+ m_context << eth::Instruction::SWAP1 << u256(31) << eth::Instruction::AND;
+ // stack: <target + size> <remainder = size % 32>
+ eth::AssemblyItem skip = m_context.newTag();
+ if (_sourceType.isDynamicallySized())
+ {
+ m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO;
+ m_context.appendConditionalJumpTo(skip);
+ }
+ // round off, load from there.
+ // stack <target + size> <remainder = size % 32>
+ m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3;
+ m_context << eth::Instruction::SUB;
+ // stack: target+size remainder <target + size - remainder>
+ m_context << eth::Instruction::DUP1 << eth::Instruction::MLOAD;
+ // Now we AND it with ~(2**(8 * (32 - remainder)) - 1)
+ m_context << u256(1);
+ m_context << eth::Instruction::DUP4 << u256(32) << eth::Instruction::SUB;
+ // stack: ...<v> 1 <32 - remainder>
+ m_context << u256(0x100) << eth::Instruction::EXP << eth::Instruction::SUB;
+ m_context << eth::Instruction::NOT << eth::Instruction::AND;
+ // stack: target+size remainder target+size-remainder <v & ...>
+ m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE;
+ // stack: target+size remainder target+size-remainder
+ m_context << u256(32) << eth::Instruction::ADD;
+ // stack: target+size remainder <new_padded_end>
+ m_context << eth::Instruction::SWAP2 << eth::Instruction::POP;
+
+ if (_sourceType.isDynamicallySized())
+ m_context << skip.tag();
+ // stack <target + "size"> <remainder = size % 32>
+ m_context << eth::Instruction::POP;
+ }
+ else
+ // stack: <target> <size>
+ m_context << eth::Instruction::ADD;
+ }
+ else
+ {
+ solAssert(_sourceType.location() == DataLocation::Storage, "");
+ unsigned storageBytes = _sourceType.getBaseType()->getStorageBytes();
+ u256 storageSize = _sourceType.getBaseType()->getStorageSize();
+ solAssert(storageSize > 1 || (storageSize == 1 && storageBytes > 0), "");
+
+ m_context << eth::Instruction::POP; // remove offset, arrays always start new slot
+ retrieveLength(_sourceType);
+ // stack here: memory_offset storage_offset length
+ // jump to end if length is zero
+ m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO;
+ eth::AssemblyItem loopEnd = m_context.newTag();
+ m_context.appendConditionalJumpTo(loopEnd);
+ // compute memory end offset
+ if (baseSize > 1)
+ // convert length to memory size
+ m_context << u256(baseSize) << eth::Instruction::MUL;
+ m_context << eth::Instruction::DUP3 << eth::Instruction::ADD << eth::Instruction::SWAP2;
+ if (_sourceType.isDynamicallySized())
+ {
+ // actual array data is stored at SHA3(storage_offset)
+ m_context << eth::Instruction::SWAP1;
+ CompilerUtils(m_context).computeHashStatic();
+ m_context << eth::Instruction::SWAP1;
+ }
+
+ // stack here: memory_end_offset storage_data_offset memory_offset
+ bool haveByteOffset = !_sourceType.isByteArray() && storageBytes <= 16;
+ if (haveByteOffset)
+ m_context << u256(0) << eth::Instruction::SWAP1;
+ // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset
+ eth::AssemblyItem loopStart = m_context.newTag();
+ m_context << loopStart;
+ // load and store
+ if (_sourceType.isByteArray())
+ {
+ // Packed both in storage and memory.
+ m_context << eth::Instruction::DUP2 << eth::Instruction::SLOAD;
+ m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE;
+ // increment storage_data_offset by 1
+ m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD;
+ // increment memory offset by 32
+ m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD;
+ }
+ else
+ {
+ // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset
+ if (haveByteOffset)
+ m_context << eth::Instruction::DUP3 << eth::Instruction::DUP3;
+ else
+ m_context << eth::Instruction::DUP2 << u256(0);
+ StorageItem(m_context, *_sourceType.getBaseType()).retrieveValue(SourceLocation(), true);
+ CompilerUtils(m_context).storeInMemoryDynamic(*_sourceType.getBaseType());
+ // increment storage_data_offset and byte offset
+ if (haveByteOffset)
+ incrementByteOffset(storageBytes, 2, 3);
+ else
+ {
+ m_context << eth::Instruction::SWAP1;
+ m_context << storageSize << eth::Instruction::ADD;
+ m_context << eth::Instruction::SWAP1;
+ }
+ }
+ // check for loop condition
+ m_context << eth::Instruction::DUP1 << eth::dupInstruction(haveByteOffset ? 5 : 4) << eth::Instruction::GT;
+ m_context.appendConditionalJumpTo(loopStart);
+ // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset
+ if (haveByteOffset)
+ m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
+ if (_padToWordBoundaries && baseSize % 32 != 0)
+ {
+ // memory_end_offset - start is the actual length (we want to compute the ceil of).
+ // memory_offset - start is its next multiple of 32, but it might be off by 32.
+ // so we compute: memory_end_offset += (memory_offset - memory_end_offest) & 31
+ m_context << eth::Instruction::DUP3 << eth::Instruction::SWAP1 << eth::Instruction::SUB;
+ m_context << u256(31) << eth::Instruction::AND;
+ m_context << eth::Instruction::DUP3 << eth::Instruction::ADD;
+ m_context << eth::Instruction::SWAP2;
+ }
+ m_context << loopEnd << eth::Instruction::POP << eth::Instruction::POP;
+ }
+}
+
void ArrayUtils::clearArray(ArrayType const& _type) const
{
unsigned stackHeightStart = m_context.getStackHeight();
diff --git a/ArrayUtils.h b/ArrayUtils.h
index dab40e2d..8d56f3c8 100644
--- a/ArrayUtils.h
+++ b/ArrayUtils.h
@@ -44,6 +44,10 @@ public:
/// Stack pre: source_reference [source_byte_offset/source_length] target_reference target_byte_offset
/// Stack post: target_reference target_byte_offset
void copyArrayToStorage(ArrayType const& _targetType, ArrayType const& _sourceType) const;
+ /// Copies an array (which cannot be dynamically nested) from anywhere to memory.
+ /// Stack pre: memory_offset source_item
+ /// Stack post: memory_offest + length(padded)
+ void copyArrayToMemory(ArrayType const& _sourceType, bool _padToWordBoundaries = true) const;
/// Clears the given dynamic or static array.
/// Stack pre: storage_ref storage_byte_offset
/// Stack post:
diff --git a/CompilerUtils.cpp b/CompilerUtils.cpp
index 5bd6de13..b6d79733 100644
--- a/CompilerUtils.cpp
+++ b/CompilerUtils.cpp
@@ -25,6 +25,7 @@
#include <libevmcore/Instruction.h>
#include <libevmcore/Params.h>
#include <libsolidity/ArrayUtils.h>
+#include <libsolidity/LValue.h>
using namespace std;
@@ -103,130 +104,10 @@ unsigned CompilerUtils::storeInMemory(unsigned _offset, Type const& _type, bool
void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries)
{
if (_type.getCategory() == Type::Category::Array)
- {
- auto const& type = dynamic_cast<ArrayType const&>(_type);
- solAssert(type.isByteArray(), "Non byte arrays not yet implemented here.");
-
- if (type.location() == DataLocation::CallData)
- {
- if (!type.isDynamicallySized())
- m_context << type.getLength();
- // stack: target source_offset source_len
- m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5;
- // stack: target source_offset source_len source_len source_offset target
- m_context << eth::Instruction::CALLDATACOPY;
- m_context << eth::Instruction::DUP3 << eth::Instruction::ADD;
- m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP;
- }
- else if (type.location() == DataLocation::Memory)
- {
- // memcpy using the built-in contract
- ArrayUtils(m_context).retrieveLength(type);
- if (type.isDynamicallySized())
- {
- // change pointer to data part
- m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD;
- m_context << eth::Instruction::SWAP1;
- }
- // stack: <target> <source> <length>
- // stack for call: outsize target size source value contract gas
- m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4;
- m_context << eth::Instruction::DUP2 << eth::Instruction::DUP5;
- m_context << u256(0) << u256(identityContractAddress);
- //@TODO do not use ::CALL if less than 32 bytes?
- //@todo in production, we should not have to pair c_callNewAccountGas.
- m_context << u256(eth::c_callGas + 15 + eth::c_callNewAccountGas) << eth::Instruction::GAS;
- m_context << eth::Instruction::SUB << eth::Instruction::CALL;
- m_context << eth::Instruction::POP; // ignore return value
-
- m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
- // stack: <target> <length>
-
- if (_padToWordBoundaries && (type.isDynamicallySized() || (type.getLength()) % 32 != 0))
- {
- // stack: <target> <length>
- m_context << eth::Instruction::SWAP1 << eth::Instruction::DUP2 << eth::Instruction::ADD;
- // stack: <length> <target + length>
- m_context << eth::Instruction::SWAP1 << u256(31) << eth::Instruction::AND;
- // stack: <target + length> <remainder = length % 32>
- eth::AssemblyItem skip = m_context.newTag();
- if (type.isDynamicallySized())
- {
- m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO;
- m_context.appendConditionalJumpTo(skip);
- }
- // round off, load from there.
- // stack <target + length> <remainder = length % 32>
- m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3;
- m_context << eth::Instruction::SUB;
- // stack: target+length remainder <target + length - remainder>
- m_context << eth::Instruction::DUP1 << eth::Instruction::MLOAD;
- // Now we AND it with ~(2**(8 * (32 - remainder)) - 1)
- m_context << u256(1);
- m_context << eth::Instruction::DUP4 << u256(32) << eth::Instruction::SUB;
- // stack: ...<v> 1 <32 - remainder>
- m_context << u256(0x100) << eth::Instruction::EXP << eth::Instruction::SUB;
- m_context << eth::Instruction::NOT << eth::Instruction::AND;
- // stack: target+length remainder target+length-remainder <v & ...>
- m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE;
- // stack: target+length remainder target+length-remainder
- m_context << u256(32) << eth::Instruction::ADD;
- // stack: target+length remainder <new_padded_end>
- m_context << eth::Instruction::SWAP2 << eth::Instruction::POP;
-
- if (type.isDynamicallySized())
- m_context << skip.tag();
- // stack <target + "length"> <remainder = length % 32>
- m_context << eth::Instruction::POP;
- }
- else
- // stack: <target> <length>
- m_context << eth::Instruction::ADD;
- }
- else
- {
- solAssert(type.location() == DataLocation::Storage, "");
- m_context << eth::Instruction::POP; // remove offset, arrays always start new slot
- m_context << eth::Instruction::DUP1 << eth::Instruction::SLOAD;
- // stack here: memory_offset storage_offset length_bytes
- // jump to end if length is zero
- m_context << eth::Instruction::DUP1 << eth::Instruction::ISZERO;
- eth::AssemblyItem loopEnd = m_context.newTag();
- m_context.appendConditionalJumpTo(loopEnd);
- // compute memory end offset
- m_context << eth::Instruction::DUP3 << eth::Instruction::ADD << eth::Instruction::SWAP2;
- // actual array data is stored at SHA3(storage_offset)
- m_context << eth::Instruction::SWAP1;
- CompilerUtils(m_context).computeHashStatic();
- m_context << eth::Instruction::SWAP1;
-
- // stack here: memory_end_offset storage_data_offset memory_offset
- eth::AssemblyItem loopStart = m_context.newTag();
- m_context << loopStart;
- // load and store
- m_context << eth::Instruction::DUP2 << eth::Instruction::SLOAD;
- m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE;
- // increment storage_data_offset by 1
- m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD;
- // increment memory offset by 32
- m_context << eth::Instruction::SWAP1 << u256(32) << eth::Instruction::ADD;
- // check for loop condition
- m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4 << eth::Instruction::GT;
- m_context.appendConditionalJumpTo(loopStart);
- // stack here: memory_end_offset storage_data_offset memory_offset
- if (_padToWordBoundaries)
- {
- // memory_end_offset - start is the actual length (we want to compute the ceil of).
- // memory_offset - start is its next multiple of 32, but it might be off by 32.
- // so we compute: memory_end_offset += (memory_offset - memory_end_offest) & 31
- m_context << eth::Instruction::DUP3 << eth::Instruction::SWAP1 << eth::Instruction::SUB;
- m_context << u256(31) << eth::Instruction::AND;
- m_context << eth::Instruction::DUP3 << eth::Instruction::ADD;
- m_context << eth::Instruction::SWAP2;
- }
- m_context << loopEnd << eth::Instruction::POP << eth::Instruction::POP;
- }
- }
+ ArrayUtils(m_context).copyArrayToMemory(
+ dynamic_cast<ArrayType const&>(_type),
+ _padToWordBoundaries
+ );
else
{
unsigned numBytes = prepareMemoryStore(_type, _padToWordBoundaries);
@@ -341,6 +222,21 @@ void CompilerUtils::encodeToMemory(
popStackSlots(argSize + dynPointers + 1);
}
+void CompilerUtils::memoryCopy()
+{
+ // Stack here: size target source
+ // stack for call: outsize target size source value contract gas
+ //@TODO do not use ::CALL if less than 32 bytes?
+ m_context << eth::Instruction::DUP3 << eth::Instruction::SWAP1;
+ m_context << u256(0) << u256(identityContractAddress);
+ // compute gas costs
+ m_context << u256(32) << eth::Instruction::DUP5 << u256(31) << eth::Instruction::ADD;
+ m_context << eth::Instruction::DIV << u256(eth::c_identityWordGas) << eth::Instruction::MUL;
+ m_context << u256(eth::c_identityGas) << eth::Instruction::ADD;
+ m_context << eth::Instruction::CALL;
+ m_context << eth::Instruction::POP; // ignore return value
+}
+
void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded)
{
// For a type extension, we need to remove all higher-order bits that we might have ignored in
diff --git a/CompilerUtils.h b/CompilerUtils.h
index a880f9ee..ac70088b 100644
--- a/CompilerUtils.h
+++ b/CompilerUtils.h
@@ -77,6 +77,8 @@ public:
);
/// Dynamic version of @see storeInMemory, expects the memory offset below the value on the stack
/// and also updates that. For arrays, only copies the data part.
+ /// @param _padToWordBoundaries if true, adds zeros to pad to multiple of 32 bytes. Array elements
+ /// are always padded (except for byte arrays), regardless of this parameter.
/// Stack pre: memory_offset value...
/// Stack post: (memory_offset+length)
void storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries = true);
@@ -99,6 +101,11 @@ public:
bool _copyDynamicDataInPlace = false
);
+ /// Uses a CALL to the identity contract to perform a memory-to-memory copy.
+ /// Stack pre: <size> <target> <source>
+ /// Stack post:
+ void memoryCopy();
+
/// Appends code for an implicit or explicit type conversion. This includes erasing higher
/// order bits (@see appendHighBitCleanup) when widening integer but also copy to memory
/// if a reference type is converted from calldata or storage to memory.
diff --git a/Types.cpp b/Types.cpp
index 10a59826..ab93839d 100644
--- a/Types.cpp
+++ b/Types.cpp
@@ -721,9 +721,13 @@ bool ArrayType::isImplicitlyConvertibleTo(const Type& _convertTo) const
}
else
{
- // Require that the base type is the same, not only convertible.
- // This disallows assignment of nested arrays from storage to memory for now.
- if (*getBaseType() != *convertTo.getBaseType())
+ // Conversion to storage pointer or to memory, we de not copy element-for-element here, so
+ // require that the base type is the same, not only convertible.
+ // This disallows assignment of nested dynamic arrays from storage to memory for now.
+ if (
+ *copyForLocationIfReference(location(), getBaseType()) !=
+ *copyForLocationIfReference(location(), convertTo.getBaseType())
+ )
return false;
if (isDynamicallySized() != convertTo.isDynamicallySized())
return false;