This commit is contained in:
lz_db
2025-11-16 12:31:03 +08:00
commit 0fab423a18
1451 changed files with 743213 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
from .abi import *
from .account import *
from .typing import *
from .utils import *
from .hexbytes import *
__all__ = [ 'account', 'typing', 'utils', 'abi', 'hexbytes' ]

View File

@@ -0,0 +1,16 @@
from .abi import (
decode,
decode_abi,
decode_single,
encode,
encode_abi,
encode_single,
is_encodable,
is_encodable_type,
)
# This code from: https://github.com/ethereum/eth-abi/tree/v3.0.1
__version__ = 'ccxt'
__all__ = ['decode','encode']

View File

@@ -0,0 +1,19 @@
from .codec import (
ABICodec,
)
from .registry import (
registry,
)
default_codec = ABICodec(registry)
encode = default_codec.encode
encode_abi = default_codec.encode_abi # deprecated
encode_single = default_codec.encode_single # deprecated
decode = default_codec.decode
decode_abi = default_codec.decode_abi # deprecated
decode_single = default_codec.decode_single # deprecated
is_encodable = default_codec.is_encodable
is_encodable_type = default_codec.is_encodable_type

View File

@@ -0,0 +1,152 @@
import functools
from ..typing.abi import (
TypeStr,
)
from .grammar import (
BasicType,
TupleType,
normalize,
parse,
)
def parse_type_str(expected_base=None, with_arrlist=False):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a type string with an (optional) expected base is required in
that method.
"""
def decorator(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = "{} (normalized to {})".format(
type_str_repr,
repr(normalized_type_str),
)
if expected_base is not None:
if not isinstance(abi_type, BasicType):
raise ValueError(
"Cannot create {} for non-basic type {}".format(
cls.__name__,
type_str_repr,
)
)
if abi_type.base != expected_base:
raise ValueError(
"Cannot create {} for type {}: expected type with "
"base '{}'".format(
cls.__name__,
type_str_repr,
expected_base,
)
)
if not with_arrlist and abi_type.arrlist is not None:
raise ValueError(
"Cannot create {} for type {}: expected type with "
"no array dimension list".format(
cls.__name__,
type_str_repr,
)
)
if with_arrlist and abi_type.arrlist is None:
raise ValueError(
"Cannot create {} for type {}: expected type with "
"array dimension list".format(
cls.__name__,
type_str_repr,
)
)
# Perform general validation of default solidity types
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
return decorator
def parse_tuple_type_str(old_from_type_str):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a tuple type string is required in that method.
"""
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = "{} (normalized to {})".format(
type_str_repr,
repr(normalized_type_str),
)
if not isinstance(abi_type, TupleType):
raise ValueError(
"Cannot create {} for non-tuple type {}".format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
class BaseCoder:
"""
Base class for all encoder and decoder classes.
"""
is_dynamic = False
def __init__(self, **kwargs):
cls = type(self)
# Ensure no unrecognized kwargs were given
for key, value in kwargs.items():
if not hasattr(cls, key):
raise AttributeError(
"Property {key} not found on {cls_name} class. "
"`{cls_name}.__init__` only accepts keyword arguments which are "
"present on the {cls_name} class.".format(
key=key,
cls_name=cls.__name__,
)
)
setattr(self, key, value)
# Validate given combination of kwargs
self.validate()
def validate(self):
pass
@classmethod
def from_type_str(
cls, type_str: TypeStr, registry
) -> "BaseCoder": # pragma: no cover
"""
Used by :any:`ABIRegistry` to get an appropriate encoder or decoder
instance for the given type string and type registry.
"""
raise NotImplementedError("Must implement `from_type_str`")

View File

@@ -0,0 +1,217 @@
from typing import (
Any,
Iterable,
Tuple,
)
import warnings
from ..typing.abi import (
Decodable,
TypeStr,
)
from ..utils import (
is_bytes,
)
from .decoding import (
ContextFramesBytesIO,
TupleDecoder,
)
from .encoding import (
TupleEncoder,
)
from .exceptions import (
EncodingError,
)
from .registry import (
ABIRegistry,
)
class BaseABICoder:
"""
Base class for porcelain coding APIs. These are classes which wrap
instances of :class:`~.registry.ABIRegistry` to provide last-mile
coding functionality.
"""
def __init__(self, registry: ABIRegistry):
"""
Constructor.
:param registry: The registry providing the encoders to be used when
encoding values.
"""
self._registry = registry
class ABIEncoder(BaseABICoder):
"""
Wraps a registry to provide last-mile encoding functionality.
"""
def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
"""
Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``.
"""
warnings.warn(
"abi.encode_single() and abi.encode_single_packed() are deprecated "
"and will be removed in version 4.0.0 in favor of abi.encode() and "
"abi.encode_packed(), respectively",
category=DeprecationWarning,
)
encoder = self._registry.get_encoder(typ)
return encoder(arg)
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
"""
Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``.
"""
warnings.warn(
"abi.encode_abi() and abi.encode_abi_packed() are deprecated and will be "
"removed in version 4.0.0 in favor of abi.encode() and "
"abi.encode_packed(), respectively",
category=DeprecationWarning,
)
return self.encode(types, args)
def encode(self, types, args):
encoders = [self._registry.get_encoder(type_str) for type_str in types]
encoder = TupleEncoder(encoders=encoders)
return encoder(args)
def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
"""
Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``.
"""
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True
def is_encodable_type(self, typ: TypeStr) -> bool:
"""
Returns ``True`` if values for the ABI type ``typ`` can be encoded by
this codec.
:param typ: A string representation for the ABI type that will be
checked for encodability e.g. ``'uint256'``, ``'bytes[]'``,
``'(int,int)'``, etc.
:returns: ``True`` if values for ``typ`` can be encoded by this codec.
Otherwise, ``False``.
"""
return self._registry.has_encoder(typ)
class ABIDecoder(BaseABICoder):
"""
Wraps a registry to provide last-mile decoding functionality.
"""
stream_class = ContextFramesBytesIO
def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
"""
Decodes the binary value ``data`` of the ABI type ``typ`` into its
equivalent python value.
:param typ: The string representation of the ABI type that will be used for
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
:param data: The binary value to be decoded.
:returns: The equivalent python value of the ABI value represented in
``data``.
"""
warnings.warn(
"abi.decode_single() is deprecated and will be removed in version 4.0.0 "
"in favor of abi.decode()",
category=DeprecationWarning,
)
if not is_bytes(data):
raise TypeError(
"The `data` value must be of bytes type. Got {0}".format(type(data))
)
decoder = self._registry.get_decoder(typ)
stream = self.stream_class(data)
return decoder(stream)
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
"""
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
"""
warnings.warn(
"abi.decode_abi() is deprecated and will be removed in version 4.0.0 in "
"favor of abi.decode()",
category=DeprecationWarning,
)
return self.decode(types, data)
def decode(self, types, data):
if not is_bytes(data):
raise TypeError(
f"The `data` value must be of bytes type. Got {type(data)}"
)
decoders = [self._registry.get_decoder(type_str) for type_str in types]
decoder = TupleDecoder(decoders=decoders)
stream = self.stream_class(data)
return decoder(stream)
class ABICodec(ABIEncoder, ABIDecoder):
pass

View File

@@ -0,0 +1,3 @@
TT256 = 2**256
TT256M1 = 2**256 - 1
TT255 = 2**255

View File

@@ -0,0 +1,565 @@
import abc
import decimal
import io
from typing import (
Any,
)
from ..utils import (
big_endian_to_int,
to_normalized_address,
to_tuple,
)
from .base import (
BaseCoder,
parse_tuple_type_str,
parse_type_str,
)
from .exceptions import (
DecodingError,
InsufficientDataBytes,
NonEmptyPaddingBytes,
)
from .utils.numeric import (
TEN,
abi_decimal_context,
ceil32,
)
class ContextFramesBytesIO(io.BytesIO):
"""
A byte stream which can track a series of contextual frames in a stack. This
data structure is necessary to perform nested decodings using the
:py:class:``HeadTailDecoder`` since offsets present in head sections are
relative only to a particular encoded object. These offsets can only be
used to locate a position in a decoding stream if they are paired with a
contextual offset that establishes the position of the object in which they
are found.
For example, consider the encoding of a value for the following type::
type: (int,(int,int[]))
value: (1,(2,[3,3]))
There are two tuples in this type: one inner and one outer. The inner tuple
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
This means that its value encoding will be placed in the tail section of the
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
itself, contain a tail section with the encoding for ``[3,3]``. All
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
data values are normally 32 bytes wide but have been truncated to remove the
redundant zeros at the beginnings of their encodings)::
offset data
--------------------------
^ 0 0x01
| 32 0x40 <-- Offset of object A in global frame (64)
-----|--------------------
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
| | 96 0x40 <-- Offset of object B in frame of object A (64)
-----|-Object A's frame---
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
| | 160 0x03
v v 192 0x03
--------------------------
Note that the offset of object B is encoded as 64 which only specifies the
beginning of its encoded value relative to the beginning of object A's
encoding. Globally, object B is located at offset 128. In order to make
sense out of object B's offset, it needs to be positioned in the context of
its enclosing object's frame (object A).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._frames = []
self._total_offset = 0
def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
self.seek(self._total_offset + pos, *args, **kwargs)
def push_frame(self, offset):
"""
Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset.
"""
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0)
def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError("no frames to pop")
self._total_offset -= offset
self.seek(return_pos)
class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
"""
Base class for all decoder classes. Subclass this if you want to define a
custom decoder class. Subclasses must also implement
:any:`BaseCoder.from_type_str`.
"""
@abc.abstractmethod
def decode(self, stream: ContextFramesBytesIO) -> Any: # pragma: no cover
"""
Decodes the given stream of bytes into a python value. Should raise
:any:`exceptions.DecodingError` if a python value cannot be decoded
from the given byte stream.
"""
pass
def __call__(self, stream: ContextFramesBytesIO) -> Any:
return self.decode(stream)
class HeadTailDecoder(BaseDecoder):
is_dynamic = True
tail_decoder = None
def validate(self):
super().validate()
if self.tail_decoder is None:
raise ValueError("No `tail_decoder` set")
def decode(self, stream):
start_pos = decode_uint_256(stream)
stream.push_frame(start_pos)
value = self.tail_decoder(stream)
stream.pop_frame()
return value
class TupleDecoder(BaseDecoder):
decoders = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.decoders = tuple(
HeadTailDecoder(tail_decoder=d) if getattr(d, "is_dynamic", False) else d
for d in self.decoders
)
self.is_dynamic = any(getattr(d, "is_dynamic", False) for d in self.decoders)
def validate(self):
super().validate()
if self.decoders is None:
raise ValueError("No `decoders` set")
@to_tuple
def decode(self, stream):
for decoder in self.decoders:
yield decoder(stream)
@parse_tuple_type_str
def from_type_str(cls, abi_type, registry):
decoders = tuple(
registry.get_decoder(c.to_type_str()) for c in abi_type.components
)
return cls(decoders=decoders)
class SingleDecoder(BaseDecoder):
decoder_fn = None
def validate(self):
super().validate()
if self.decoder_fn is None:
raise ValueError("No `decoder_fn` set")
def validate_padding_bytes(self, value, padding_bytes):
raise NotImplementedError("Must be implemented by subclasses")
def decode(self, stream):
raw_data = self.read_data_from_stream(stream)
data, padding_bytes = self.split_data_and_padding(raw_data)
value = self.decoder_fn(data)
self.validate_padding_bytes(value, padding_bytes)
return value
def read_data_from_stream(self, stream):
raise NotImplementedError("Must be implemented by subclasses")
def split_data_and_padding(self, raw_data):
return raw_data, b""
class BaseArrayDecoder(BaseDecoder):
item_decoder = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Use a head-tail decoder to decode dynamic elements
if self.item_decoder.is_dynamic:
self.item_decoder = HeadTailDecoder(
tail_decoder=self.item_decoder,
)
def validate(self):
super().validate()
if self.item_decoder is None:
raise ValueError("No `item_decoder` set")
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_decoder = registry.get_decoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
# If array dimension is fixed
return SizedArrayDecoder(
array_size=array_spec[0],
item_decoder=item_decoder,
)
else:
# If array dimension is dynamic
return DynamicArrayDecoder(item_decoder=item_decoder)
class SizedArrayDecoder(BaseArrayDecoder):
array_size = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = self.item_decoder.is_dynamic
@to_tuple
def decode(self, stream):
for _ in range(self.array_size):
yield self.item_decoder(stream)
class DynamicArrayDecoder(BaseArrayDecoder):
# Dynamic arrays are always dynamic, regardless of their elements
is_dynamic = True
@to_tuple
def decode(self, stream):
array_size = decode_uint_256(stream)
stream.push_frame(32)
for _ in range(array_size):
yield self.item_decoder(stream)
stream.pop_frame()
class FixedByteSizeDecoder(SingleDecoder):
decoder_fn = None
value_bit_size = None
data_byte_size = None
is_big_endian = None
def validate(self):
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be None")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be None")
if self.decoder_fn is None:
raise ValueError("`decoder_fn` may not be None")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be None")
if self.value_bit_size % 8 != 0:
raise ValueError(
"Invalid value bit size: {0}. Must be a multiple of 8".format(
self.value_bit_size,
)
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def read_data_from_stream(self, stream):
data = stream.read(self.data_byte_size)
if len(data) != self.data_byte_size:
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
self.data_byte_size,
len(data),
)
)
return data
def split_data_and_padding(self, raw_data):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if self.is_big_endian:
padding_bytes = raw_data[:padding_size]
data = raw_data[padding_size:]
else:
data = raw_data[:value_byte_size]
padding_bytes = raw_data[value_byte_size:]
return data, padding_bytes
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if padding_bytes != b"\x00" * padding_size:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
def _get_value_byte_size(self):
value_byte_size = self.value_bit_size // 8
return value_byte_size
class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
data_byte_size = 32
class BooleanDecoder(Fixed32ByteSizeDecoder):
value_bit_size = 8
is_big_endian = True
@staticmethod
def decoder_fn(data):
if data == b"\x00":
return False
elif data == b"\x01":
return True
else:
raise NonEmptyPaddingBytes(
"Boolean must be either 0x0 or 0x1. Got: {0}".format(repr(data))
)
@parse_type_str("bool")
def from_type_str(cls, abi_type, registry):
return cls()
class AddressDecoder(Fixed32ByteSizeDecoder):
value_bit_size = 20 * 8
is_big_endian = True
decoder_fn = staticmethod(to_normalized_address)
@parse_type_str("address")
def from_type_str(cls, abi_type, registry):
return cls()
#
# Unsigned Integer Decoders
#
class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
decoder_fn = staticmethod(big_endian_to_int)
is_big_endian = True
@parse_type_str("uint")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
decode_uint_256 = UnsignedIntegerDecoder(value_bit_size=256)
#
# Signed Integer Decoders
#
class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
is_big_endian = True
def decoder_fn(self, data):
value = big_endian_to_int(data)
if value >= 2 ** (self.value_bit_size - 1):
return value - 2**self.value_bit_size
else:
return value
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if value >= 0:
expected_padding_bytes = b"\x00" * padding_size
else:
expected_padding_bytes = b"\xff" * padding_size
if padding_bytes != expected_padding_bytes:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
@parse_type_str("int")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
#
# Bytes1..32
#
class BytesDecoder(Fixed32ByteSizeDecoder):
is_big_endian = False
@staticmethod
def decoder_fn(data):
return data
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub * 8)
class BaseFixedDecoder(Fixed32ByteSizeDecoder):
frac_places = None
is_big_endian = True
def validate(self):
super().validate()
if self.frac_places is None:
raise ValueError("must specify `frac_places`")
if self.frac_places <= 0 or self.frac_places > 80:
raise ValueError("`frac_places` must be in range (0, 80]")
class UnsignedFixedDecoder(BaseFixedDecoder):
def decoder_fn(self, data):
value = big_endian_to_int(data)
with decimal.localcontext(abi_decimal_context):
decimal_value = decimal.Decimal(value) / TEN**self.frac_places
return decimal_value
@parse_type_str("ufixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
class SignedFixedDecoder(BaseFixedDecoder):
def decoder_fn(self, data):
value = big_endian_to_int(data)
if value >= 2 ** (self.value_bit_size - 1):
signed_value = value - 2**self.value_bit_size
else:
signed_value = value
with decimal.localcontext(abi_decimal_context):
decimal_value = decimal.Decimal(signed_value) / TEN**self.frac_places
return decimal_value
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if value >= 0:
expected_padding_bytes = b"\x00" * padding_size
else:
expected_padding_bytes = b"\xff" * padding_size
if padding_bytes != expected_padding_bytes:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
@parse_type_str("fixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
#
# String and Bytes
#
class ByteStringDecoder(SingleDecoder):
is_dynamic = True
@staticmethod
def decoder_fn(data):
return data
@staticmethod
def read_data_from_stream(stream):
data_length = decode_uint_256(stream)
padded_length = ceil32(data_length)
data = stream.read(padded_length)
if len(data) < padded_length:
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
padded_length,
len(data),
)
)
padding_bytes = data[data_length:]
if padding_bytes != b"\x00" * (padded_length - data_length):
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
return data[:data_length]
def validate_padding_bytes(self, value, padding_bytes):
pass
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls()
class StringDecoder(ByteStringDecoder):
@parse_type_str("string")
def from_type_str(cls, abi_type, registry):
return cls()
@staticmethod
def decoder_fn(data):
try:
value = data.decode("utf-8")
except UnicodeDecodeError as e:
raise DecodingError(
e.encoding,
e.object,
e.start,
e.end,
"The returned type for this function is string which is "
"expected to be a UTF8 encoded string of text. The returned "
"value could not be decoded as valid UTF8. This is indicative "
"of a broken application which is using incorrect return types for "
"binary data.",
) from e
return value

View File

@@ -0,0 +1,720 @@
import abc
import codecs
import decimal
from itertools import (
accumulate,
)
from typing import (
Any,
Optional,
Type,
)
from ..utils import (
int_to_big_endian,
is_address,
is_boolean,
is_bytes,
is_integer,
is_list_like,
is_number,
is_text,
to_canonical_address,
)
from .base import (
BaseCoder,
parse_tuple_type_str,
parse_type_str,
)
from .exceptions import (
EncodingTypeError,
IllegalValue,
ValueOutOfBounds,
)
from .utils.numeric import (
TEN,
abi_decimal_context,
ceil32,
compute_signed_fixed_bounds,
compute_signed_integer_bounds,
compute_unsigned_fixed_bounds,
compute_unsigned_integer_bounds,
)
from .utils.padding import (
fpad,
zpad,
zpad_right,
)
from .utils.string import (
abbr,
)
class BaseEncoder(BaseCoder, metaclass=abc.ABCMeta):
"""
Base class for all encoder classes. Subclass this if you want to define a
custom encoder class. Subclasses must also implement
:any:`BaseCoder.from_type_str`.
"""
@abc.abstractmethod
def encode(self, value: Any) -> bytes: # pragma: no cover
"""
Encodes the given value as a sequence of bytes. Should raise
:any:`exceptions.EncodingError` if ``value`` cannot be encoded.
"""
pass
@abc.abstractmethod
def validate_value(self, value: Any) -> None: # pragma: no cover
"""
Checks whether or not the given value can be encoded by this encoder.
If the given value cannot be encoded, must raise
:any:`exceptions.EncodingError`.
"""
pass
@classmethod
def invalidate_value(
cls,
value: Any,
exc: Type[Exception] = EncodingTypeError,
msg: Optional[str] = None,
) -> None:
"""
Throws a standard exception for when a value is not encodable by an
encoder.
"""
raise exc(
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
rep=abbr(value),
typ=type(value),
cls=cls.__name__,
msg="" if msg is None else (": " + msg),
)
)
def __call__(self, value: Any) -> bytes:
return self.encode(value)
class TupleEncoder(BaseEncoder):
encoders = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = any(getattr(e, "is_dynamic", False) for e in self.encoders)
def validate(self):
super().validate()
if self.encoders is None:
raise ValueError("`encoders` may not be none")
def validate_value(self, value):
if not is_list_like(value):
self.invalidate_value(
value,
msg="must be list-like object such as array or tuple",
)
if len(value) != len(self.encoders):
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="value has {} items when {} were expected".format(
len(value),
len(self.encoders),
),
)
for item, encoder in zip(value, self.encoders):
try:
encoder.validate_value(item)
except AttributeError:
encoder(item)
def encode(self, values):
self.validate_value(values)
raw_head_chunks = []
tail_chunks = []
for value, encoder in zip(values, self.encoders):
if getattr(encoder, "is_dynamic", False):
raw_head_chunks.append(None)
tail_chunks.append(encoder(value))
else:
raw_head_chunks.append(encoder(value))
tail_chunks.append(b"")
head_length = sum(32 if item is None else len(item) for item in raw_head_chunks)
tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
head_chunks = tuple(
encode_uint_256(head_length + offset) if chunk is None else chunk
for chunk, offset in zip(raw_head_chunks, tail_offsets)
)
encoded_value = b"".join(head_chunks + tuple(tail_chunks))
return encoded_value
@parse_tuple_type_str
def from_type_str(cls, abi_type, registry):
encoders = tuple(
registry.get_encoder(c.to_type_str()) for c in abi_type.components
)
return cls(encoders=encoders)
class FixedSizeEncoder(BaseEncoder):
value_bit_size = None
data_byte_size = None
encode_fn = None
type_check_fn = None
is_big_endian = None
def validate(self):
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be none")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be none")
if self.encode_fn is None:
raise ValueError("`encode_fn` may not be none")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be none")
if self.value_bit_size % 8 != 0:
raise ValueError(
"Invalid value bit size: {0}. Must be a multiple of 8".format(
self.value_bit_size,
)
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def validate_value(self, value):
raise NotImplementedError("Must be implemented by subclasses")
def encode(self, value):
self.validate_value(value)
base_encoded_value = self.encode_fn(value)
if self.is_big_endian:
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
else:
padded_encoded_value = zpad_right(base_encoded_value, self.data_byte_size)
return padded_encoded_value
class Fixed32ByteSizeEncoder(FixedSizeEncoder):
data_byte_size = 32
class BooleanEncoder(Fixed32ByteSizeEncoder):
value_bit_size = 8
is_big_endian = True
@classmethod
def validate_value(cls, value):
if not is_boolean(value):
cls.invalidate_value(value)
@classmethod
def encode_fn(cls, value):
if value is True:
return b"\x01"
elif value is False:
return b"\x00"
else:
raise ValueError("Invariant")
@parse_type_str("bool")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedBooleanEncoder(BooleanEncoder):
data_byte_size = 1
class NumberEncoder(Fixed32ByteSizeEncoder):
is_big_endian = True
bounds_fn = None
illegal_value_fn = None
type_check_fn = None
def validate(self):
super().validate()
if self.bounds_fn is None:
raise ValueError("`bounds_fn` cannot be null")
if self.type_check_fn is None:
raise ValueError("`type_check_fn` cannot be null")
def validate_value(self, value):
if not self.type_check_fn(value):
self.invalidate_value(value)
illegal_value = self.illegal_value_fn is not None and self.illegal_value_fn(
value
)
if illegal_value:
self.invalidate_value(value, exc=IllegalValue)
lower_bound, upper_bound = self.bounds_fn(self.value_bit_size)
if value < lower_bound or value > upper_bound:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg=(
"Cannot be encoded in {} bits. Must be bounded "
"between [{}, {}].".format(
self.value_bit_size,
lower_bound,
upper_bound,
)
),
)
class UnsignedIntegerEncoder(NumberEncoder):
encode_fn = staticmethod(int_to_big_endian)
bounds_fn = staticmethod(compute_unsigned_integer_bounds)
type_check_fn = staticmethod(is_integer)
@parse_type_str("uint")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
encode_uint_256 = UnsignedIntegerEncoder(value_bit_size=256, data_byte_size=32)
class PackedUnsignedIntegerEncoder(UnsignedIntegerEncoder):
@parse_type_str("uint")
def from_type_str(cls, abi_type, registry):
return cls(
value_bit_size=abi_type.sub,
data_byte_size=abi_type.sub // 8,
)
class SignedIntegerEncoder(NumberEncoder):
bounds_fn = staticmethod(compute_signed_integer_bounds)
type_check_fn = staticmethod(is_integer)
def encode_fn(self, value):
return int_to_big_endian(value % (2**self.value_bit_size))
def encode(self, value):
self.validate_value(value)
base_encoded_value = self.encode_fn(value)
if value >= 0:
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
else:
padded_encoded_value = fpad(base_encoded_value, self.data_byte_size)
return padded_encoded_value
@parse_type_str("int")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
class PackedSignedIntegerEncoder(SignedIntegerEncoder):
@parse_type_str("int")
def from_type_str(cls, abi_type, registry):
return cls(
value_bit_size=abi_type.sub,
data_byte_size=abi_type.sub // 8,
)
class BaseFixedEncoder(NumberEncoder):
frac_places = None
@staticmethod
def type_check_fn(value):
return is_number(value) and not isinstance(value, float)
@staticmethod
def illegal_value_fn(value):
if isinstance(value, decimal.Decimal):
return value.is_nan() or value.is_infinite()
return False
def validate_value(self, value):
super().validate_value(value)
with decimal.localcontext(abi_decimal_context):
residue = value % (TEN**-self.frac_places)
if residue > 0:
self.invalidate_value(
value,
exc=IllegalValue,
msg="residue {} outside allowed fractional precision of {}".format(
repr(residue),
self.frac_places,
),
)
def validate(self):
super().validate()
if self.frac_places is None:
raise ValueError("must specify `frac_places`")
if self.frac_places <= 0 or self.frac_places > 80:
raise ValueError("`frac_places` must be in range (0, 80]")
class UnsignedFixedEncoder(BaseFixedEncoder):
def bounds_fn(self, value_bit_size):
return compute_unsigned_fixed_bounds(self.value_bit_size, self.frac_places)
def encode_fn(self, value):
with decimal.localcontext(abi_decimal_context):
scaled_value = value * TEN**self.frac_places
integer_value = int(scaled_value)
return int_to_big_endian(integer_value)
@parse_type_str("ufixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
frac_places=frac_places,
)
class PackedUnsignedFixedEncoder(UnsignedFixedEncoder):
@parse_type_str("ufixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
data_byte_size=value_bit_size // 8,
frac_places=frac_places,
)
class SignedFixedEncoder(BaseFixedEncoder):
def bounds_fn(self, value_bit_size):
return compute_signed_fixed_bounds(self.value_bit_size, self.frac_places)
def encode_fn(self, value):
with decimal.localcontext(abi_decimal_context):
scaled_value = value * TEN**self.frac_places
integer_value = int(scaled_value)
unsigned_integer_value = integer_value % (2**self.value_bit_size)
return int_to_big_endian(unsigned_integer_value)
def encode(self, value):
self.validate_value(value)
base_encoded_value = self.encode_fn(value)
if value >= 0:
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
else:
padded_encoded_value = fpad(base_encoded_value, self.data_byte_size)
return padded_encoded_value
@parse_type_str("fixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
frac_places=frac_places,
)
class PackedSignedFixedEncoder(SignedFixedEncoder):
@parse_type_str("fixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
data_byte_size=value_bit_size // 8,
frac_places=frac_places,
)
class AddressEncoder(Fixed32ByteSizeEncoder):
value_bit_size = 20 * 8
encode_fn = staticmethod(to_canonical_address)
is_big_endian = True
@classmethod
def validate_value(cls, value):
if not is_address(value):
cls.invalidate_value(value)
def validate(self):
super().validate()
if self.value_bit_size != 20 * 8:
raise ValueError("Addresses must be 160 bits in length")
@parse_type_str("address")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedAddressEncoder(AddressEncoder):
data_byte_size = 20
class BytesEncoder(Fixed32ByteSizeEncoder):
is_big_endian = False
def validate_value(self, value):
if not is_bytes(value):
self.invalidate_value(value)
byte_size = self.value_bit_size // 8
if len(value) > byte_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="exceeds total byte size for bytes{} encoding".format(byte_size),
)
@staticmethod
def encode_fn(value):
return value
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub * 8)
class PackedBytesEncoder(BytesEncoder):
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls(
value_bit_size=abi_type.sub * 8,
data_byte_size=abi_type.sub,
)
class ByteStringEncoder(BaseEncoder):
is_dynamic = True
@classmethod
def validate_value(cls, value):
if not is_bytes(value):
cls.invalidate_value(value)
@classmethod
def encode(cls, value):
cls.validate_value(value)
if not value:
padded_value = b"\x00" * 32
else:
padded_value = zpad_right(value, ceil32(len(value)))
encoded_size = encode_uint_256(len(value))
encoded_value = encoded_size + padded_value
return encoded_value
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedByteStringEncoder(ByteStringEncoder):
is_dynamic = False
@classmethod
def encode(cls, value):
cls.validate_value(value)
return value
class TextStringEncoder(BaseEncoder):
is_dynamic = True
@classmethod
def validate_value(cls, value):
if not is_text(value):
cls.invalidate_value(value)
@classmethod
def encode(cls, value):
cls.validate_value(value)
value_as_bytes = codecs.encode(value, "utf8")
if not value_as_bytes:
padded_value = b"\x00" * 32
else:
padded_value = zpad_right(value_as_bytes, ceil32(len(value_as_bytes)))
encoded_size = encode_uint_256(len(value_as_bytes))
encoded_value = encoded_size + padded_value
return encoded_value
@parse_type_str("string")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedTextStringEncoder(TextStringEncoder):
is_dynamic = False
@classmethod
def encode(cls, value):
cls.validate_value(value)
return codecs.encode(value, "utf8")
class BaseArrayEncoder(BaseEncoder):
item_encoder = None
def validate(self):
super().validate()
if self.item_encoder is None:
raise ValueError("`item_encoder` may not be none")
def validate_value(self, value):
if not is_list_like(value):
self.invalidate_value(
value,
msg="must be list-like such as array or tuple",
)
for item in value:
self.item_encoder.validate_value(item)
def encode_elements(self, value):
self.validate_value(value)
item_encoder = self.item_encoder
tail_chunks = tuple(item_encoder(i) for i in value)
items_are_dynamic = getattr(item_encoder, "is_dynamic", False)
if not items_are_dynamic:
return b"".join(tail_chunks)
head_length = 32 * len(value)
tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
head_chunks = tuple(
encode_uint_256(head_length + offset) for offset in tail_offsets
)
return b"".join(head_chunks + tail_chunks)
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
# If array dimension is fixed
return SizedArrayEncoder(
array_size=array_spec[0],
item_encoder=item_encoder,
)
else:
# If array dimension is dynamic
return DynamicArrayEncoder(item_encoder=item_encoder)
class PackedArrayEncoder(BaseArrayEncoder):
array_size = None
def validate_value(self, value):
super().validate_value(value)
if self.array_size is not None and len(value) != self.array_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="value has {} items when {} were expected".format(
len(value),
self.array_size,
),
)
def encode(self, value):
encoded_elements = self.encode_elements(value)
return encoded_elements
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
return cls(
array_size=array_spec[0],
item_encoder=item_encoder,
)
else:
return cls(item_encoder=item_encoder)
class SizedArrayEncoder(BaseArrayEncoder):
array_size = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = self.item_encoder.is_dynamic
def validate(self):
super().validate()
if self.array_size is None:
raise ValueError("`array_size` may not be none")
def validate_value(self, value):
super().validate_value(value)
if len(value) != self.array_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="value has {} items when {} were expected".format(
len(value),
self.array_size,
),
)
def encode(self, value):
encoded_elements = self.encode_elements(value)
return encoded_elements
class DynamicArrayEncoder(BaseArrayEncoder):
is_dynamic = True
def encode(self, value):
encoded_size = encode_uint_256(len(value))
encoded_elements = self.encode_elements(value)
encoded_value = encoded_size + encoded_elements
return encoded_value

View File

@@ -0,0 +1,139 @@
from ...parsimonious import (
ParseError
)
class EncodingError(Exception):
"""
Base exception for any error that occurs during encoding.
"""
pass
class EncodingTypeError(EncodingError):
"""
Raised when trying to encode a python value whose type is not supported for
the output ABI type.
"""
pass
class IllegalValue(EncodingError):
"""
Raised when trying to encode a python value with the correct type but with
a value that is not considered legal for the output ABI type.
Example:
.. code-block:: python
fixed128x19_encoder(Decimal('NaN')) # cannot encode NaN
"""
pass
class ValueOutOfBounds(IllegalValue):
"""
Raised when trying to encode a python value with the correct type but with
a value that appears outside the range of valid values for the output ABI
type.
Example:
.. code-block:: python
ufixed8x1_encoder(Decimal('25.6')) # out of bounds
"""
pass
class DecodingError(Exception):
"""
Base exception for any error that occurs during decoding.
"""
pass
class InsufficientDataBytes(DecodingError):
"""
Raised when there are insufficient data to decode a value for a given ABI
type.
"""
pass
class NonEmptyPaddingBytes(DecodingError):
"""
Raised when the padding bytes of an ABI value are malformed.
"""
pass
class ParseError(ParseError):
"""
Raised when an ABI type string cannot be parsed.
"""
def __str__(self):
return "Parse error at '{}' (column {}) in type string '{}'".format(
self.text[self.pos : self.pos + 5],
self.column(),
self.text,
)
class ABITypeError(ValueError):
"""
Raised when a parsed ABI type has inconsistent properties; for example,
when trying to parse the type string ``'uint7'`` (which has a bit-width
that is not congruent with zero modulo eight).
"""
pass
class PredicateMappingError(Exception):
"""
Raised when an error occurs in a registry's internal mapping.
"""
pass
class NoEntriesFound(ValueError, PredicateMappingError):
"""
Raised when no registration is found for a type string in a registry's
internal mapping.
.. warning::
In a future version of ``eth-abi``, this error class will no longer
inherit from ``ValueError``.
"""
pass
class MultipleEntriesFound(ValueError, PredicateMappingError):
"""
Raised when multiple registrations are found for a type string in a
registry's internal mapping. This error is non-recoverable and indicates
that a registry was configured incorrectly. Registrations are expected to
cover completely distinct ranges of type strings.
.. warning::
In a future version of ``eth-abi``, this error class will no longer
inherit from ``ValueError``.
"""
pass

View File

@@ -0,0 +1,443 @@
import functools
import re
from ...parsimonious import (
expressions,
ParseError,
NodeVisitor,
Grammar
)
from .exceptions import (
ABITypeError,
ParseError,
)
grammar = Grammar(
r"""
type = tuple_type / basic_type
tuple_type = components arrlist?
components = non_zero_tuple / zero_tuple
non_zero_tuple = "(" type next_type* ")"
next_type = "," type
zero_tuple = "()"
basic_type = base sub? arrlist?
base = alphas
sub = two_size / digits
two_size = (digits "x" digits)
arrlist = (const_arr / dynam_arr)+
const_arr = "[" digits "]"
dynam_arr = "[]"
alphas = ~"[A-Za-z]+"
digits = ~"[1-9][0-9]*"
"""
)
class NodeVisitor(NodeVisitor):
"""
Parsimonious node visitor which performs both parsing of type strings and
post-processing of parse trees. Parsing operations are cached.
"""
grammar = grammar
def visit_non_zero_tuple(self, node, visited_children):
# Ignore left and right parens
_, first, rest, _ = visited_children
return (first,) + rest
def visit_tuple_type(self, node, visited_children):
components, arrlist = visited_children
return TupleType(components, arrlist, node=node)
def visit_next_type(self, node, visited_children):
# Ignore comma
_, abi_type = visited_children
return abi_type
def visit_zero_tuple(self, node, visited_children):
return tuple()
def visit_basic_type(self, node, visited_children):
base, sub, arrlist = visited_children
return BasicType(base, sub, arrlist, node=node)
def visit_two_size(self, node, visited_children):
# Ignore "x"
first, _, second = visited_children
return first, second
def visit_const_arr(self, node, visited_children):
# Ignore left and right brackets
_, int_value, _ = visited_children
return (int_value,)
def visit_dynam_arr(self, node, visited_children):
return tuple()
def visit_alphas(self, node, visited_children):
return node.text
def visit_digits(self, node, visited_children):
return int(node.text)
def generic_visit(self, node, visited_children):
if isinstance(node.expr, expressions.OneOf):
# Unwrap value chosen from alternatives
return visited_children[0]
if isinstance(node.expr, expressions.Optional):
# Unwrap optional value or return `None`
if len(visited_children) != 0:
return visited_children[0]
return None
return tuple(visited_children)
@functools.lru_cache(maxsize=None)
def parse(self, type_str):
"""
Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string.
"""
if not isinstance(type_str, str):
raise TypeError(
"Can only parse string values: got {}".format(type(type_str))
)
try:
return super().parse(type_str)
except ParseError as e:
raise ParseError(e.text, e.pos, e.expr)
visitor = NodeVisitor()
class ABIType:
"""
Base class for results of type string parsing operations.
"""
__slots__ = ("arrlist", "node")
def __init__(self, arrlist=None, node=None):
self.arrlist = arrlist
"""
The list of array dimensions for a parsed type. Equal to ``None`` if
type string has no array dimensions.
"""
self.node = node
"""
The parsimonious ``Node`` instance associated with this parsed type.
Used to generate error messages for invalid types.
"""
def __repr__(self): # pragma: no cover
return "<{} {}>".format(
type(self).__qualname__,
repr(self.to_type_str()),
)
def __eq__(self, other):
# Two ABI types are equal if their string representations are equal
return type(self) is type(other) and self.to_type_str() == other.to_type_str()
def to_type_str(self): # pragma: no cover
"""
Returns the string representation of an ABI type. This will be equal to
the type string from which it was created.
"""
raise NotImplementedError("Must implement `to_type_str`")
@property
def item_type(self):
"""
If this type is an array type, equal to an appropriate
:class:`~eth_abi.grammar.ABIType` instance for the array's items.
"""
raise NotImplementedError("Must implement `item_type`")
def validate(self): # pragma: no cover
"""
Validates the properties of an ABI type against the solidity ABI spec:
https://solidity.readthedocs.io/en/develop/abi-spec.html
Raises :class:`~eth_abi.exceptions.ABITypeError` if validation fails.
"""
raise NotImplementedError("Must implement `validate`")
def invalidate(self, error_msg):
# Invalidates an ABI type with the given error message. Expects that a
# parsimonious node was provided from the original parsing operation
# that yielded this type.
node = self.node
raise ABITypeError(
"For '{comp_str}' type at column {col} "
"in '{type_str}': {error_msg}".format(
comp_str=node.text,
col=node.start + 1,
type_str=node.full_text,
error_msg=error_msg,
),
)
@property
def is_array(self):
"""
Equal to ``True`` if a type is an array type (i.e. if it has an array
dimension list). Otherwise, equal to ``False``.
"""
return self.arrlist is not None
@property
def is_dynamic(self):
"""
Equal to ``True`` if a type has a dynamically sized encoding.
Otherwise, equal to ``False``.
"""
raise NotImplementedError("Must implement `is_dynamic`")
@property
def _has_dynamic_arrlist(self):
return self.is_array and any(len(dim) == 0 for dim in self.arrlist)
class TupleType(ABIType):
"""
Represents the result of parsing a tuple type string e.g. "(int,bool)".
"""
__slots__ = ("components",)
def __init__(self, components, arrlist=None, *, node=None):
super().__init__(arrlist, node)
self.components = components
"""
A tuple of :class:`~eth_abi.grammar.ABIType` instances for each of the
tuple type's components.
"""
def to_type_str(self):
arrlist = self.arrlist
if isinstance(arrlist, tuple):
arrlist = "".join(repr(list(a)) for a in arrlist)
else:
arrlist = ""
return "({}){}".format(
",".join(c.to_type_str() for c in self.components),
arrlist,
)
@property
def item_type(self):
if not self.is_array:
raise ValueError(
"Cannot determine item type for non-array type '{}'".format(
self.to_type_str(),
)
)
return type(self)(
self.components,
self.arrlist[:-1] or None,
node=self.node,
)
def validate(self):
for c in self.components:
c.validate()
@property
def is_dynamic(self):
if self._has_dynamic_arrlist:
return True
return any(c.is_dynamic for c in self.components)
class BasicType(ABIType):
"""
Represents the result of parsing a basic type string e.g. "uint", "address",
"ufixed128x19[][2]".
"""
__slots__ = ("base", "sub")
def __init__(self, base, sub=None, arrlist=None, *, node=None):
super().__init__(arrlist, node)
self.base = base
"""The base of a basic type e.g. "uint" for "uint256" etc."""
self.sub = sub
"""
The sub type of a basic type e.g. ``256`` for "uint256" or ``(128, 18)``
for "ufixed128x18" etc. Equal to ``None`` if type string has no sub
type.
"""
def to_type_str(self):
sub, arrlist = self.sub, self.arrlist
if isinstance(sub, int):
sub = str(sub)
elif isinstance(sub, tuple):
sub = "x".join(str(s) for s in sub)
else:
sub = ""
if isinstance(arrlist, tuple):
arrlist = "".join(repr(list(a)) for a in arrlist)
else:
arrlist = ""
return self.base + sub + arrlist
@property
def item_type(self):
if not self.is_array:
raise ValueError(
"Cannot determine item type for non-array type '{}'".format(
self.to_type_str(),
)
)
return type(self)(
self.base,
self.sub,
self.arrlist[:-1] or None,
node=self.node,
)
@property
def is_dynamic(self):
if self._has_dynamic_arrlist:
return True
if self.base == "string":
return True
if self.base == "bytes" and self.sub is None:
return True
return False
def validate(self):
base, sub = self.base, self.sub
# Check validity of string type
if base == "string":
if sub is not None:
self.invalidate("string type cannot have suffix")
# Check validity of bytes type
elif base == "bytes":
if not (sub is None or isinstance(sub, int)):
self.invalidate(
"bytes type must have either no suffix or a numerical suffix"
)
if isinstance(sub, int) and sub > 32:
self.invalidate("maximum 32 bytes for fixed-length bytes")
# Check validity of integer type
elif base in ("int", "uint"):
if not isinstance(sub, int):
self.invalidate("integer type must have numerical suffix")
if sub < 8 or 256 < sub:
self.invalidate("integer size out of bounds (max 256 bits)")
if sub % 8 != 0:
self.invalidate("integer size must be multiple of 8")
# Check validity of fixed type
elif base in ("fixed", "ufixed"):
if not isinstance(sub, tuple):
self.invalidate(
"fixed type must have suffix of form <bits>x<exponent>, "
"e.g. 128x19",
)
bits, minus_e = sub
if bits < 8 or 256 < bits:
self.invalidate("fixed size out of bounds (max 256 bits)")
if bits % 8 != 0:
self.invalidate("fixed size must be multiple of 8")
if minus_e < 1 or 80 < minus_e:
self.invalidate(
"fixed exponent size out of bounds, {} must be in 1-80".format(
minus_e,
),
)
# Check validity of hash type
elif base == "hash":
if not isinstance(sub, int):
self.invalidate("hash type must have numerical suffix")
# Check validity of address type
elif base == "address":
if sub is not None:
self.invalidate("address cannot have suffix")
TYPE_ALIASES = {
"int": "int256",
"uint": "uint256",
"fixed": "fixed128x18",
"ufixed": "ufixed128x18",
"function": "bytes24",
"byte": "bytes1",
}
TYPE_ALIAS_RE = re.compile(
r"\b({})\b".format("|".join(re.escape(a) for a in TYPE_ALIASES.keys()))
)
def normalize(type_str):
"""
Normalizes a type string into its canonical version e.g. the type string
'int' becomes 'int256', etc.
:param type_str: The type string to be normalized.
:returns: The canonical version of the input type string.
"""
return TYPE_ALIAS_RE.sub(
lambda match: TYPE_ALIASES[match.group(0)],
type_str,
)
parse = visitor.parse

View File

@@ -0,0 +1,13 @@
from .codec import (
ABIEncoder,
)
from .registry import (
registry_packed,
)
default_encoder_packed = ABIEncoder(registry_packed)
encode_packed = default_encoder_packed.encode
is_encodable_packed = default_encoder_packed.is_encodable
encode_single_packed = default_encoder_packed.encode_single # deprecated
encode_abi_packed = default_encoder_packed.encode_abi # deprecated

View File

@@ -0,0 +1,643 @@
import abc
import copy
import functools
from typing import (
Any,
Callable,
Type,
Union,
)
from ..typing import (
abi,
)
from . import (
decoding,
encoding,
exceptions,
grammar,
)
from .base import (
BaseCoder,
)
from .exceptions import (
ABITypeError,
MultipleEntriesFound,
NoEntriesFound,
)
Lookup = Union[abi.TypeStr, Callable[[abi.TypeStr], bool]]
EncoderCallable = Callable[[Any], bytes]
DecoderCallable = Callable[[decoding.ContextFramesBytesIO], Any]
Encoder = Union[EncoderCallable, Type[encoding.BaseEncoder]]
Decoder = Union[DecoderCallable, Type[decoding.BaseDecoder]]
class Copyable(abc.ABC):
@abc.abstractmethod
def copy(self):
pass
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
class PredicateMapping(Copyable):
"""
Acts as a mapping from predicate functions to values. Values are retrieved
when their corresponding predicate matches a given input. Predicates can
also be labeled to facilitate removal from the mapping.
"""
def __init__(self, name):
self._name = name
self._values = {}
self._labeled_predicates = {}
def add(self, predicate, value, label=None):
if predicate in self._values:
raise ValueError(
"Matcher {} already exists in {}".format(
repr(predicate),
self._name,
)
)
if label is not None:
if label in self._labeled_predicates:
raise ValueError(
"Matcher {} with label '{}' already exists in {}".format(
repr(predicate),
label,
self._name,
),
)
self._labeled_predicates[label] = predicate
self._values[predicate] = value
def find(self, type_str):
results = tuple(
(predicate, value)
for predicate, value in self._values.items()
if predicate(type_str)
)
if len(results) == 0:
raise NoEntriesFound(
"No matching entries for '{}' in {}".format(
type_str,
self._name,
)
)
predicates, values = tuple(zip(*results))
if len(results) > 1:
predicate_reprs = ", ".join(map(repr, predicates))
raise MultipleEntriesFound(
f"Multiple matching entries for '{type_str}' in {self._name}: "
f"{predicate_reprs}. This occurs when two registrations match the "
"same type string. You may need to delete one of the "
"registrations or modify its matching behavior to ensure it "
'doesn\'t collide with other registrations. See the "Registry" '
"documentation for more information."
)
return values[0]
def remove_by_equality(self, predicate):
# Delete the predicate mapping to the previously stored value
try:
del self._values[predicate]
except KeyError:
raise KeyError(
"Matcher {} not found in {}".format(
repr(predicate),
self._name,
)
)
# Delete any label which refers to this predicate
try:
label = self._label_for_predicate(predicate)
except ValueError:
pass
else:
del self._labeled_predicates[label]
def _label_for_predicate(self, predicate):
# Both keys and values in `_labeled_predicates` are unique since the
# `add` method enforces this
for key, value in self._labeled_predicates.items():
if value is predicate:
return key
raise ValueError(
"Matcher {} not referred to by any label in {}".format(
repr(predicate),
self._name,
)
)
def remove_by_label(self, label):
try:
predicate = self._labeled_predicates[label]
except KeyError:
raise KeyError("Label '{}' not found in {}".format(label, self._name))
del self._labeled_predicates[label]
del self._values[predicate]
def remove(self, predicate_or_label):
if callable(predicate_or_label):
self.remove_by_equality(predicate_or_label)
elif isinstance(predicate_or_label, str):
self.remove_by_label(predicate_or_label)
else:
raise TypeError(
"Key to be removed must be callable or string: got {}".format(
type(predicate_or_label),
)
)
def copy(self):
cpy = type(self)(self._name)
cpy._values = copy.copy(self._values)
cpy._labeled_predicates = copy.copy(self._labeled_predicates)
return cpy
class Predicate:
"""
Represents a predicate function to be used for type matching in
``ABIRegistry``.
"""
__slots__ = tuple()
def __call__(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("Must implement `__call__`")
def __str__(self): # pragma: no cover
raise NotImplementedError("Must implement `__str__`")
def __repr__(self):
return "<{} {}>".format(type(self).__name__, self)
def __iter__(self):
for attr in self.__slots__:
yield getattr(self, attr)
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return type(self) is type(other) and tuple(self) == tuple(other)
class Equals(Predicate):
"""
A predicate that matches any input equal to `value`.
"""
__slots__ = ("value",)
def __init__(self, value):
self.value = value
def __call__(self, other):
return self.value == other
def __str__(self):
return "(== {})".format(repr(self.value))
class BaseEquals(Predicate):
"""
A predicate that matches a basic type string with a base component equal to
`value` and no array component. If `with_sub` is `True`, the type string
must have a sub component to match. If `with_sub` is `False`, the type
string must *not* have a sub component to match. If `with_sub` is None,
the type string's sub component is ignored.
"""
__slots__ = ("base", "with_sub")
def __init__(self, base, *, with_sub=None):
self.base = base
self.with_sub = with_sub
def __call__(self, type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
if isinstance(abi_type, grammar.BasicType):
if abi_type.arrlist is not None:
return False
if self.with_sub is not None:
if self.with_sub and abi_type.sub is None:
return False
if not self.with_sub and abi_type.sub is not None:
return False
return abi_type.base == self.base
# We'd reach this point if `type_str` did not contain a basic type
# e.g. if it contained a tuple type
return False
def __str__(self):
return "(base == {}{})".format(
repr(self.base),
""
if self.with_sub is None
else (" and sub is not None" if self.with_sub else " and sub is None"),
)
def has_arrlist(type_str):
"""
A predicate that matches a type string with an array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None
def is_base_tuple(type_str):
"""
A predicate that matches a tuple type with no array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None
def _clear_encoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_encoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
def _clear_decoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_decoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
class BaseRegistry:
@staticmethod
def _register(mapping, lookup, value, label=None):
if callable(lookup):
mapping.add(lookup, value, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), value, lookup)
return
raise TypeError(
"Lookup must be a callable or a value of type `str`: got {}".format(
repr(lookup),
)
)
@staticmethod
def _unregister(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
"Lookup/label must be a callable or a value of type `str`: got {}".format(
repr(lookup_or_label),
)
)
@staticmethod
def _get_registration(mapping, type_str):
try:
value = mapping.find(type_str)
except ValueError as e:
if "No matching" in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
return value
class ABIRegistry(Copyable, BaseRegistry):
def __init__(self):
self._encoders = PredicateMapping("encoder registry")
self._decoders = PredicateMapping("decoder registry")
def _get_registration(self, mapping, type_str):
coder = super()._get_registration(mapping, type_str)
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(
self, lookup: Lookup, encoder: Encoder, label: str = None
) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(
self, lookup: Lookup, decoder: Decoder, label: str = None
) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister(self._decoders, lookup_or_label)
def register(
self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str = None
) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_registration(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except (ABITypeError, NoEntriesFound):
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_registration(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
registry = ABIRegistry()
registry.register(
BaseEquals("uint"),
encoding.UnsignedIntegerEncoder,
decoding.UnsignedIntegerDecoder,
label="uint",
)
registry.register(
BaseEquals("int"),
encoding.SignedIntegerEncoder,
decoding.SignedIntegerDecoder,
label="int",
)
registry.register(
BaseEquals("address"),
encoding.AddressEncoder,
decoding.AddressDecoder,
label="address",
)
registry.register(
BaseEquals("bool"),
encoding.BooleanEncoder,
decoding.BooleanDecoder,
label="bool",
)
registry.register(
BaseEquals("ufixed"),
encoding.UnsignedFixedEncoder,
decoding.UnsignedFixedDecoder,
label="ufixed",
)
registry.register(
BaseEquals("fixed"),
encoding.SignedFixedEncoder,
decoding.SignedFixedDecoder,
label="fixed",
)
registry.register(
BaseEquals("bytes", with_sub=True),
encoding.BytesEncoder,
decoding.BytesDecoder,
label="bytes<M>",
)
registry.register(
BaseEquals("bytes", with_sub=False),
encoding.ByteStringEncoder,
decoding.ByteStringDecoder,
label="bytes",
)
registry.register(
BaseEquals("function"),
encoding.BytesEncoder,
decoding.BytesDecoder,
label="function",
)
registry.register(
BaseEquals("string"),
encoding.TextStringEncoder,
decoding.StringDecoder,
label="string",
)
registry.register(
has_arrlist,
encoding.BaseArrayEncoder,
decoding.BaseArrayDecoder,
label="has_arrlist",
)
registry.register(
is_base_tuple,
encoding.TupleEncoder,
decoding.TupleDecoder,
label="is_base_tuple",
)
registry_packed = ABIRegistry()
registry_packed.register_encoder(
BaseEquals("uint"),
encoding.PackedUnsignedIntegerEncoder,
label="uint",
)
registry_packed.register_encoder(
BaseEquals("int"),
encoding.PackedSignedIntegerEncoder,
label="int",
)
registry_packed.register_encoder(
BaseEquals("address"),
encoding.PackedAddressEncoder,
label="address",
)
registry_packed.register_encoder(
BaseEquals("bool"),
encoding.PackedBooleanEncoder,
label="bool",
)
registry_packed.register_encoder(
BaseEquals("ufixed"),
encoding.PackedUnsignedFixedEncoder,
label="ufixed",
)
registry_packed.register_encoder(
BaseEquals("fixed"),
encoding.PackedSignedFixedEncoder,
label="fixed",
)
registry_packed.register_encoder(
BaseEquals("bytes", with_sub=True),
encoding.PackedBytesEncoder,
label="bytes<M>",
)
registry_packed.register_encoder(
BaseEquals("bytes", with_sub=False),
encoding.PackedByteStringEncoder,
label="bytes",
)
registry_packed.register_encoder(
BaseEquals("function"),
encoding.PackedBytesEncoder,
label="function",
)
registry_packed.register_encoder(
BaseEquals("string"),
encoding.PackedTextStringEncoder,
label="string",
)
registry_packed.register_encoder(
has_arrlist,
encoding.PackedArrayEncoder,
label="has_arrlist",
)
registry_packed.register_encoder(
is_base_tuple,
encoding.TupleEncoder,
label="is_base_tuple",
)

View File

@@ -0,0 +1,3 @@
from ._strategies import ( # noqa: F401
get_abi_strategy,
)

View File

@@ -0,0 +1,230 @@
from typing import (
Callable,
Union,
)
from ...typing.abi import (
TypeStr,
)
from ..utils import (
to_checksum_address,
)
from hypothesis import (
strategies as st,
)
from ..grammar import (
ABIType,
normalize,
parse,
)
from ..registry import (
BaseEquals,
BaseRegistry,
Lookup,
PredicateMapping,
has_arrlist,
is_base_tuple,
)
from ..utils.numeric import (
scale_places,
)
StrategyFactory = Callable[[ABIType, "StrategyRegistry"], st.SearchStrategy]
StrategyRegistration = Union[st.SearchStrategy, StrategyFactory]
class StrategyRegistry(BaseRegistry):
def __init__(self):
self._strategies = PredicateMapping("strategy registry")
def register_strategy(
self, lookup: Lookup, registration: StrategyRegistration, label: str = None
) -> None:
self._register(self._strategies, lookup, registration, label=label)
def unregister_strategy(self, lookup_or_label: Lookup) -> None:
self._unregister(self._strategies, lookup_or_label)
def get_strategy(self, type_str: TypeStr) -> st.SearchStrategy:
"""
Returns a hypothesis strategy for the given ABI type.
:param type_str: The canonical string representation of the ABI type
for which a hypothesis strategy should be returned.
:returns: A hypothesis strategy for generating Python values that are
encodable as values of the given ABI type.
"""
registration = self._get_registration(self._strategies, type_str)
if isinstance(registration, st.SearchStrategy):
# If a hypothesis strategy was registered, just return it
return registration
else:
# Otherwise, assume the factory is a callable. Call it with the abi
# type to get an appropriate hypothesis strategy.
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
strategy = registration(abi_type, self)
return strategy
def get_uint_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits = abi_type.sub
return st.integers(
min_value=0,
max_value=2**bits - 1,
)
def get_int_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits = abi_type.sub
return st.integers(
min_value=-(2 ** (bits - 1)),
max_value=2 ** (bits - 1) - 1,
)
address_strategy = st.binary(min_size=20, max_size=20).map(to_checksum_address)
bool_strategy = st.booleans()
def get_ufixed_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits, places = abi_type.sub
return st.decimals(
min_value=0,
max_value=2**bits - 1,
places=0,
).map(scale_places(places))
def get_fixed_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits, places = abi_type.sub
return st.decimals(
min_value=-(2 ** (bits - 1)),
max_value=2 ** (bits - 1) - 1,
places=0,
).map(scale_places(places))
def get_bytes_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
num_bytes = abi_type.sub
return st.binary(
min_size=num_bytes,
max_size=num_bytes,
)
bytes_strategy = st.binary(min_size=0, max_size=4096)
string_strategy = st.text()
def get_array_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
item_type = abi_type.item_type
item_type_str = item_type.to_type_str()
item_strategy = registry.get_strategy(item_type_str)
last_dim = abi_type.arrlist[-1]
if len(last_dim) == 0:
# Is dynamic list. Don't restrict length.
return st.lists(item_strategy)
else:
# Is static list. Restrict length.
dim_size = last_dim[0]
return st.lists(item_strategy, min_size=dim_size, max_size=dim_size)
def get_tuple_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
component_strategies = [
registry.get_strategy(comp_abi_type.to_type_str())
for comp_abi_type in abi_type.components
]
return st.tuples(*component_strategies)
strategy_registry = StrategyRegistry()
strategy_registry.register_strategy(
BaseEquals("uint"),
get_uint_strategy,
label="uint",
)
strategy_registry.register_strategy(
BaseEquals("int"),
get_int_strategy,
label="int",
)
strategy_registry.register_strategy(
BaseEquals("address", with_sub=False),
address_strategy,
label="address",
)
strategy_registry.register_strategy(
BaseEquals("bool", with_sub=False),
bool_strategy,
label="bool",
)
strategy_registry.register_strategy(
BaseEquals("ufixed"),
get_ufixed_strategy,
label="ufixed",
)
strategy_registry.register_strategy(
BaseEquals("fixed"),
get_fixed_strategy,
label="fixed",
)
strategy_registry.register_strategy(
BaseEquals("bytes", with_sub=True),
get_bytes_strategy,
label="bytes<M>",
)
strategy_registry.register_strategy(
BaseEquals("bytes", with_sub=False),
bytes_strategy,
label="bytes",
)
strategy_registry.register_strategy(
BaseEquals("function", with_sub=False),
get_bytes_strategy,
label="function",
)
strategy_registry.register_strategy(
BaseEquals("string", with_sub=False),
string_strategy,
label="string",
)
strategy_registry.register_strategy(
has_arrlist,
get_array_strategy,
label="has_arrlist",
)
strategy_registry.register_strategy(
is_base_tuple,
get_tuple_strategy,
label="is_base_tuple",
)
get_abi_strategy = strategy_registry.get_strategy

View File

@@ -0,0 +1,83 @@
import decimal
from typing import (
Callable,
Tuple,
)
ABI_DECIMAL_PREC = 999
abi_decimal_context = decimal.Context(prec=ABI_DECIMAL_PREC)
ZERO = decimal.Decimal(0)
TEN = decimal.Decimal(10)
def ceil32(x: int) -> int:
return x if x % 32 == 0 else x + 32 - (x % 32)
def compute_unsigned_integer_bounds(num_bits: int) -> Tuple[int, int]:
return (
0,
2**num_bits - 1,
)
def compute_signed_integer_bounds(num_bits: int) -> Tuple[int, int]:
return (
-1 * 2 ** (num_bits - 1),
2 ** (num_bits - 1) - 1,
)
def compute_unsigned_fixed_bounds(
num_bits: int,
frac_places: int,
) -> Tuple[decimal.Decimal, decimal.Decimal]:
int_upper = compute_unsigned_integer_bounds(num_bits)[1]
with decimal.localcontext(abi_decimal_context):
upper = decimal.Decimal(int_upper) * TEN**-frac_places
return ZERO, upper
def compute_signed_fixed_bounds(
num_bits: int,
frac_places: int,
) -> Tuple[decimal.Decimal, decimal.Decimal]:
int_lower, int_upper = compute_signed_integer_bounds(num_bits)
with decimal.localcontext(abi_decimal_context):
exp = TEN**-frac_places
lower = decimal.Decimal(int_lower) * exp
upper = decimal.Decimal(int_upper) * exp
return lower, upper
def scale_places(places: int) -> Callable[[decimal.Decimal], decimal.Decimal]:
"""
Returns a function that shifts the decimal point of decimal values to the
right by ``places`` places.
"""
if not isinstance(places, int):
raise ValueError(
f"Argument `places` must be int. Got value {places} "
f"of type {type(places)}.",
)
with decimal.localcontext(abi_decimal_context):
scaling_factor = TEN**-places
def f(x: decimal.Decimal) -> decimal.Decimal:
with decimal.localcontext(abi_decimal_context):
return x * scaling_factor
places_repr = f"Eneg{places}" if places > 0 else f"Epos{-places}"
func_name = f"scale_by_{places_repr}"
f.__name__ = func_name
f.__qualname__ = func_name
return f

View File

@@ -0,0 +1,27 @@
from ...utils.toolz import (
curry,
)
@curry
def zpad(value: bytes, length: int) -> bytes:
return value.rjust(length, b"\x00")
zpad32 = zpad(length=32)
@curry
def zpad_right(value: bytes, length: int) -> bytes:
return value.ljust(length, b"\x00")
zpad32_right = zpad_right(length=32)
@curry
def fpad(value: bytes, length: int) -> bytes:
return value.rjust(length, b"\xff")
fpad32 = fpad(length=32)

View File

@@ -0,0 +1,19 @@
from typing import (
Any,
)
def abbr(value: Any, limit: int = 79) -> str:
"""
Converts a value into its string representation and abbreviates that
representation based on the given length `limit` if necessary.
"""
rep = repr(value)
if len(rep) > limit:
if limit < 3:
raise ValueError("Abbreviation limit may not be less than 3")
rep = rep[: limit - 3] + "..."
return rep

View File

@@ -0,0 +1,3 @@
from .messages import *
__all__ = ["messages"]

View File

@@ -0,0 +1,4 @@
from .encoding_and_hashing import (
hash_domain,
hash_eip712_message,
)

View File

@@ -0,0 +1,239 @@
from typing import (
Any,
Dict,
List,
Tuple,
Union,
)
from ...abi import (
encode,
)
from ....keccak import (
SHA3 as keccak
)
from ...utils import (
to_bytes,
to_int,
)
from .helpers import (
EIP712_SOLIDITY_TYPES,
is_0x_prefixed_hexstr,
is_array_type,
parse_core_array_type,
parse_parent_array_type,
)
def get_primary_type(types: Dict[str, List[Dict[str, str]]]) -> str:
custom_types = set(types.keys())
custom_types_that_are_deps = set()
for type_ in custom_types:
type_fields = types[type_]
for field in type_fields:
parsed_type = parse_core_array_type(field["type"])
if parsed_type in custom_types and parsed_type != type_:
custom_types_that_are_deps.add(parsed_type)
primary_type = list(custom_types.difference(custom_types_that_are_deps))
if len(primary_type) == 1:
return primary_type[0]
else:
raise ValueError("Unable to determine primary type")
def encode_field(
types: Dict[str, List[Dict[str, str]]],
name: str,
type_: str,
value: Any,
) -> Tuple[str, Union[int, bytes]]:
if type_ in types.keys():
# type is a custom type
if value is None:
return ("bytes32", b"\x00" * 32)
else:
return ("bytes32", keccak(encode_data(type_, types, value)))
elif type_ in ["string", "bytes"] and value is None:
return ("bytes32", b"")
# None is allowed only for custom and dynamic types
elif value is None:
raise ValueError(f"Missing value for field `{name}` of type `{type_}`")
elif is_array_type(type_):
# handle array type with non-array value
if not isinstance(value, list):
raise ValueError(
f"Invalid value for field `{name}` of type `{type_}`: "
f"expected array, got `{value}` of type `{type(value)}`"
)
parsed_type = parse_parent_array_type(type_)
type_value_pairs = [
encode_field(types, name, parsed_type, item) for item in value
]
if not type_value_pairs:
# the keccak hash of `encode((), ())`
return (
"bytes32",
b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", # noqa: E501
)
data_types, data_hashes = zip(*type_value_pairs)
return ("bytes32", keccak(encode(data_types, data_hashes)))
elif type_ == "bool":
return (type_, bool(value))
# all bytes types allow hexstr and str values
elif type_.startswith("bytes"):
if not isinstance(value, bytes):
if is_0x_prefixed_hexstr(value):
value = to_bytes(hexstr=value)
elif isinstance(value, str):
value = to_bytes(text=value)
else:
if isinstance(value, int) and value < 0:
value = 0
value = to_bytes(value)
return (
# keccak hash if dynamic `bytes` type
("bytes32", keccak(value))
if type_ == "bytes"
# if fixed bytesXX type, do not hash
else (type_, value)
)
elif type_ == "string":
if isinstance(value, int):
value = to_bytes(value)
else:
value = to_bytes(text=value)
return ("bytes32", keccak(value))
# allow string values for int and uint types
elif type(value) == str and type_.startswith(("int", "uint")):
if is_0x_prefixed_hexstr(value):
return (type_, to_int(hexstr=value))
else:
return (type_, to_int(text=value))
return (type_, value)
def find_type_dependencies(type_, types, results=None):
if results is None:
results = set()
# a type must be a string
if not isinstance(type_, str):
raise ValueError(
"Invalid find_type_dependencies input: expected string, got "
f"`{type_}` of type `{type(type_)}`"
)
# get core type if it's an array type
type_ = parse_core_array_type(type_)
if (
# don't look for dependencies of solidity types
type_ in EIP712_SOLIDITY_TYPES
# found a type that's already been added
or type_ in results
):
return results
# found a type that isn't defined
elif type_ not in types:
raise ValueError(f"No definition of type `{type_}`")
results.add(type_)
for field in types[type_]:
find_type_dependencies(field["type"], types, results)
return results
def encode_type(type_: str, types: Dict[str, List[Dict[str, str]]]) -> str:
result = ""
unsorted_deps = find_type_dependencies(type_, types)
if type_ in unsorted_deps:
unsorted_deps.remove(type_)
deps = [type_] + sorted(list(unsorted_deps))
for type_ in deps:
children_list = []
for child in types[type_]:
child_type = child["type"]
child_name = child["name"]
children_list.append(f"{child_type} {child_name}")
result += f"{type_}({','.join(children_list)})"
return result
def hash_type(type_: str, types: Dict[str, List[Dict[str, str]]]) -> bytes:
return keccak(to_bytes(text=encode_type(type_, types)))
def encode_data(
type_: str,
types: Dict[str, List[Dict[str, str]]],
data: Dict[str, Any],
) -> bytes:
encoded_types: List[str] = ["bytes32"]
encoded_values: List[Union[bytes, int]] = [hash_type(type_, types)]
for field in types[type_]:
type, value = encode_field(
types, field["name"], field["type"], data.get(field["name"])
)
encoded_types.append(type)
encoded_values.append(value)
return encode(encoded_types, encoded_values)
def hash_struct(
type_: str,
types: Dict[str, List[Dict[str, str]]],
data: Dict[str, Any],
) -> bytes:
encoded = encode_data(type_, types, data)
return keccak(encoded)
def hash_eip712_message(
# returns the same hash as `hash_struct`, but automatically determines primary type
message_types: Dict[str, List[Dict[str, str]]],
message_data: Dict[str, Any],
) -> bytes:
primary_type = get_primary_type(message_types)
return keccak(encode_data(primary_type, message_types, message_data))
def hash_domain(domain_data: Dict[str, Any]) -> bytes:
eip712_domain_map = {
"name": {"name": "name", "type": "string"},
"version": {"name": "version", "type": "string"},
"chainId": {"name": "chainId", "type": "uint256"},
"verifyingContract": {"name": "verifyingContract", "type": "address"},
"salt": {"name": "salt", "type": "bytes32"},
}
for k in domain_data.keys():
if k not in eip712_domain_map.keys():
raise ValueError(f"Invalid domain key: `{k}`")
domain_types = {
"EIP712Domain": [
eip712_domain_map[k] for k in eip712_domain_map.keys() if k in domain_data
]
}
return hash_struct("EIP712Domain", domain_types, domain_data)

View File

@@ -0,0 +1,40 @@
from typing import (
Any,
)
from ...utils import (
is_hexstr,
)
def _get_eip712_solidity_types():
types = ["bool", "address", "string", "bytes", "uint", "int"]
ints = [f"int{(x + 1) * 8}" for x in range(32)]
uints = [f"uint{(x + 1) * 8}" for x in range(32)]
bytes_ = [f"bytes{x + 1}" for x in range(32)]
return types + ints + uints + bytes_
EIP712_SOLIDITY_TYPES = _get_eip712_solidity_types()
def is_array_type(type_: str) -> bool:
return type_.endswith("]")
def is_0x_prefixed_hexstr(value: Any) -> bool:
return is_hexstr(value) and value.startswith("0x")
# strip all brackets: Person[][] -> Person
def parse_core_array_type(type_: str) -> str:
if is_array_type(type_):
type_ = type_[: type_.index("[")]
return type_
# strip only last set of brackets: Person[3][1] -> Person[3]
def parse_parent_array_type(type_: str) -> str:
if is_array_type(type_):
type_ = type_[: type_.rindex("[")]
return type_

View File

@@ -0,0 +1,263 @@
from collections.abc import (
Mapping,
)
from typing import (
Any,
Dict,
NamedTuple,
)
import warnings
from ..typing import (
Address,
)
from ..utils.curried import (
ValidationError,
)
from ..hexbytes import (
HexBytes,
)
from .encode_typed_data.encoding_and_hashing import (
hash_domain,
hash_eip712_message,
)
# watch for updates to signature format
class SignableMessage(NamedTuple):
"""
A message compatible with EIP-191_ that is ready to be signed.
The properties are components of an EIP-191_ signable message. Other message formats
can be encoded into this format for easy signing. This data structure doesn't need
to know about the original message format. For example, you can think of
EIP-712 as compiling down to an EIP-191 message.
In typical usage, you should never need to create these by hand. Instead, use
one of the available encode_* methods in this module, like:
- :meth:`encode_typed_data`
.. _EIP-191: https://eips.ethereum.org/EIPS/eip-191
"""
version: bytes # must be length 1
header: bytes # aka "version specific data"
body: bytes # aka "data to sign"
def encode_typed_data(
domain_data: Dict[str, Any] = None,
message_types: Dict[str, Any] = None,
message_data: Dict[str, Any] = None,
full_message: Dict[str, Any] = None,
) -> SignableMessage:
r"""
Encode an EIP-712_ message in a manner compatible with other implementations
in use, such as the Metamask and Ethers ``signTypedData`` functions.
See the `EIP-712 spec <https://eips.ethereum.org/EIPS/eip-712>`_ for more information.
You may supply the information to be encoded in one of two ways:
As exactly three arguments:
- ``domain_data``, a dict of the EIP-712 domain data
- ``message_types``, a dict of custom types (do not include a ``EIP712Domain``
key)
- ``message_data``, a dict of the data to be signed
Or as a single argument:
- ``full_message``, a dict containing the following keys:
- ``types``, a dict of custom types (may include a ``EIP712Domain`` key)
- ``primaryType``, (optional) a string of the primary type of the message
- ``domain``, a dict of the EIP-712 domain data
- ``message``, a dict of the data to be signed
.. WARNING:: Note that this code has not gone through an external audit, and
the test cases are incomplete.
Type Coercion:
- For fixed-size bytes types, smaller values will be padded to fit in larger
types, but values larger than the type will raise ``ValueOutOfBounds``.
e.g., an 8-byte value will be padded to fit a ``bytes16`` type, but 16-byte
value provided for a ``bytes8`` type will raise an error.
- Fixed-size and dynamic ``bytes`` types will accept ``int``s. Any negative
values will be converted to ``0`` before being converted to ``bytes``
- ``int`` and ``uint`` types will also accept strings. If prefixed with ``"0x"``
, the string will be interpreted as hex. Otherwise, it will be interpreted as
decimal.
Noteable differences from ``signTypedData``:
- Custom types that are not alphanumeric will encode differently.
- Custom types that are used but not defined in ``types`` will not encode.
:param domain_data: EIP712 domain data
:param message_types: custom types used by the `value` data
:param message_data: data to be signed
:param full_message: a dict containing all data and types
:returns: a ``SignableMessage``, an encoded message ready to be signed
.. doctest:: python
>>> # examples of basic usage
>>> from eth_account import Account
>>> from .messages import encode_typed_data
>>> # 3-argument usage
>>> # all domain properties are optional
>>> domain_data = {
... "name": "Ether Mail",
... "version": "1",
... "chainId": 1,
... "verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC",
... "salt": b"decafbeef",
... }
>>> # custom types
>>> message_types = {
... "Person": [
... {"name": "name", "type": "string"},
... {"name": "wallet", "type": "address"},
... ],
... "Mail": [
... {"name": "from", "type": "Person"},
... {"name": "to", "type": "Person"},
... {"name": "contents", "type": "string"},
... ],
... }
>>> # the data to be signed
>>> message_data = {
... "from": {
... "name": "Cow",
... "wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826",
... },
... "to": {
... "name": "Bob",
... "wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB",
... },
... "contents": "Hello, Bob!",
... }
>>> key = "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
>>> signable_message = encode_typed_data(domain_data, message_types, message_data)
>>> signed_message = Account.sign_message(signable_message, key)
>>> signed_message.messageHash
HexBytes('0xc5bb16ccc59ae9a3ad1cb8343d4e3351f057c994a97656e1aff8c134e56f7530')
>>> # the message can be signed in one step using Account.sign_typed_data
>>> signed_typed_data = Account.sign_typed_data(key, domain_data, message_types, message_data)
>>> signed_typed_data == signed_message
True
>>> # 1-argument usage
>>> # all domain properties are optional
>>> full_message = {
... "types": {
... "EIP712Domain": [
... {"name": "name", "type": "string"},
... {"name": "version", "type": "string"},
... {"name": "chainId", "type": "uint256"},
... {"name": "verifyingContract", "type": "address"},
... {"name": "salt", "type": "bytes32"},
... ],
... "Person": [
... {"name": "name", "type": "string"},
... {"name": "wallet", "type": "address"},
... ],
... "Mail": [
... {"name": "from", "type": "Person"},
... {"name": "to", "type": "Person"},
... {"name": "contents", "type": "string"},
... ],
... },
... "primaryType": "Mail",
... "domain": {
... "name": "Ether Mail",
... "version": "1",
... "chainId": 1,
... "verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC",
... "salt": b"decafbeef"
... },
... "message": {
... "from": {
... "name": "Cow",
... "wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
... },
... "to": {
... "name": "Bob",
... "wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
... },
... "contents": "Hello, Bob!",
... },
... }
>>> signable_message_2 = encode_typed_data(full_message=full_message)
>>> signed_message_2 = Account.sign_message(signable_message_2, key)
>>> signed_message_2.messageHash
HexBytes('0xc5bb16ccc59ae9a3ad1cb8343d4e3351f057c994a97656e1aff8c134e56f7530')
>>> signed_message_2 == signed_message
True
>>> # the full_message can be signed in one step using Account.sign_typed_data
>>> signed_typed_data_2 = Account.sign_typed_data(key, domain_data, message_types, message_data)
>>> signed_typed_data_2 == signed_message_2
True
.. _EIP-712: https://eips.ethereum.org/EIPS/eip-712
""" # noqa: E501
if full_message is not None:
if (
domain_data is not None
or message_types is not None
or message_data is not None
):
raise ValueError(
"You may supply either `full_message` as a single argument or "
"`domain_data`, `message_types`, and `message_data` as three arguments,"
" but not both."
)
full_message_types = full_message["types"].copy()
full_message_domain = full_message["domain"].copy()
# If EIP712Domain types were provided, check that they match the domain data
if "EIP712Domain" in full_message_types:
domain_data_keys = list(full_message_domain.keys())
domain_types_keys = [
field["name"] for field in full_message_types["EIP712Domain"]
]
if set(domain_data_keys) != (set(domain_types_keys)):
raise ValidationError(
"The fields provided in `domain` do not match the fields provided"
" in `types.EIP712Domain`. The fields provided in `domain` were"
f" `{domain_data_keys}`, but the fields provided in "
f"`types.EIP712Domain` were `{domain_types_keys}`."
)
full_message_types.pop("EIP712Domain", None)
# If primaryType was provided, check that it matches the derived primaryType
if "primaryType" in full_message:
derived_primary_type = get_primary_type(full_message_types)
provided_primary_type = full_message["primaryType"]
if derived_primary_type != provided_primary_type:
raise ValidationError(
"The provided `primaryType` does not match the derived "
"`primaryType`. The provided `primaryType` was "
f"`{provided_primary_type}`, but the derived `primaryType` was "
f"`{derived_primary_type}`."
)
parsed_domain_data = full_message_domain
parsed_message_types = full_message_types
parsed_message_data = full_message["message"]
else:
parsed_domain_data = domain_data
parsed_message_types = message_types
parsed_message_data = message_data
return SignableMessage(
HexBytes(b"\x01"),
hash_domain(parsed_domain_data),
hash_eip712_message(parsed_message_types, parsed_message_data),
)

View File

@@ -0,0 +1,5 @@
from .main import (
HexBytes,
)
__all__ = ["HexBytes"]

View File

@@ -0,0 +1,54 @@
import binascii
from typing import (
Union,
)
def to_bytes(val: Union[bool, bytearray, bytes, int, str, memoryview]) -> bytes:
"""
Equivalent to: `eth_utils.hexstr_if_str(eth_utils.to_bytes, val)` .
Convert a hex string, integer, or bool, to a bytes representation.
Alternatively, pass through bytes or bytearray as a bytes value.
"""
if isinstance(val, bytes):
return val
elif isinstance(val, str):
return hexstr_to_bytes(val)
elif isinstance(val, bytearray):
return bytes(val)
elif isinstance(val, bool):
return b"\x01" if val else b"\x00"
elif isinstance(val, int):
# Note that this int check must come after the bool check, because
# isinstance(True, int) is True
if val < 0:
raise ValueError(f"Cannot convert negative integer {val} to bytes")
else:
return to_bytes(hex(val))
elif isinstance(val, memoryview):
return bytes(val)
else:
raise TypeError(f"Cannot convert {val!r} of type {type(val)} to bytes")
def hexstr_to_bytes(hexstr: str) -> bytes:
if hexstr.startswith(("0x", "0X")):
non_prefixed_hex = hexstr[2:]
else:
non_prefixed_hex = hexstr
# if the hex string is odd-length, then left-pad it to an even length
if len(hexstr) % 2:
padded_hex = "0" + non_prefixed_hex
else:
padded_hex = non_prefixed_hex
try:
ascii_hex = padded_hex.encode("ascii")
except UnicodeDecodeError:
raise ValueError(
f"hex string {padded_hex} may only contain [0-9a-fA-F] characters"
)
else:
return binascii.unhexlify(ascii_hex)

View File

@@ -0,0 +1,65 @@
import sys
from typing import (
TYPE_CHECKING,
Type,
Union,
cast,
overload,
)
from ._utils import (
to_bytes,
)
if TYPE_CHECKING:
from typing import (
SupportsIndex,
)
BytesLike = Union[bool, bytearray, bytes, int, str, memoryview]
class HexBytes(bytes):
"""
HexBytes is a *very* thin wrapper around the python built-in :class:`bytes` class.
It has these three changes:
1. Accepts more initializing values, like hex strings, non-negative integers,
and booleans
2. Returns hex with prefix '0x' from :meth:`HexBytes.hex`
3. The representation at console is in hex
"""
def __new__(cls: Type[bytes], val: BytesLike) -> "HexBytes":
bytesval = to_bytes(val)
return cast(HexBytes, super().__new__(cls, bytesval)) # type: ignore # https://github.com/python/typeshed/issues/2630 # noqa: E501
def hex(
self, sep: Union[str, bytes] = None, bytes_per_sep: "SupportsIndex" = 1
) -> str:
"""
Output hex-encoded bytes, with an "0x" prefix.
Everything following the "0x" is output exactly like :meth:`bytes.hex`.
"""
return "0x" + super().hex()
@overload
def __getitem__(self, key: "SupportsIndex") -> int: # noqa: F811
...
@overload # noqa: F811
def __getitem__(self, key: slice) -> "HexBytes": # noqa: F811
...
def __getitem__( # noqa: F811
self, key: Union["SupportsIndex", slice]
) -> Union[int, bytes, "HexBytes"]:
result = super().__getitem__(key)
if hasattr(result, "hex"):
return type(self)(result)
else:
return result
def __repr__(self) -> str:
return f"HexBytes({self.hex()!r})"

View File

@@ -0,0 +1,63 @@
from importlib.metadata import (
version as __version,
)
from .abi import (
Decodable,
TypeStr,
)
from .bls import (
BLSPrivateKey,
BLSPubkey,
BLSSignature,
)
from .discovery import (
NodeID,
)
from .encoding import (
HexStr,
Primitives,
)
from .enums import (
ForkName,
)
from .ethpm import (
URI,
ContractName,
Manifest,
)
from .evm import (
Address,
AnyAddress,
BlockIdentifier,
BlockNumber,
ChecksumAddress,
Hash32,
HexAddress,
)
from .networks import (
ChainId,
)
__all__ = (
"Decodable",
"TypeStr",
"BLSPrivateKey",
"BLSPubkey",
"BLSSignature",
"NodeID",
"HexStr",
"Primitives",
"ForkName",
"ChainId",
"URI",
"ContractName",
"Manifest",
"Address",
"AnyAddress",
"BlockIdentifier",
"BlockNumber",
"ChecksumAddress",
"Hash32",
"HexAddress",
)

View File

@@ -0,0 +1,6 @@
from typing import (
Union,
)
TypeStr = str
Decodable = Union[bytes, bytearray]

View File

@@ -0,0 +1,7 @@
from typing import (
NewType,
)
BLSPubkey = NewType("BLSPubkey", bytes) # bytes48
BLSPrivateKey = NewType("BLSPrivateKey", int)
BLSSignature = NewType("BLSSignature", bytes) # bytes96

View File

@@ -0,0 +1,5 @@
from typing import (
NewType,
)
NodeID = NewType("NodeID", bytes)

View File

@@ -0,0 +1,7 @@
from typing import (
NewType,
Union,
)
HexStr = NewType("HexStr", str)
Primitives = Union[bytes, int, bool]

View File

@@ -0,0 +1,17 @@
class ForkName:
Frontier = "Frontier"
Homestead = "Homestead"
EIP150 = "EIP150"
EIP158 = "EIP158"
Byzantium = "Byzantium"
Constantinople = "Constantinople"
Metropolis = "Metropolis"
ConstantinopleFix = "ConstantinopleFix"
Istanbul = "Istanbul"
Berlin = "Berlin"
London = "London"
ArrowGlacier = "ArrowGlacier"
GrayGlacier = "GrayGlacier"
Paris = "Paris"
Shanghai = "Shanghai"
Cancun = "Cancun"

View File

@@ -0,0 +1,9 @@
from typing import (
Any,
Dict,
NewType,
)
ContractName = NewType("ContractName", str)
Manifest = NewType("Manifest", Dict[str, Any])
URI = NewType("URI", str)

View File

@@ -0,0 +1,20 @@
from typing import (
Literal,
NewType,
TypeVar,
Union,
)
from .encoding import (
HexStr,
)
Hash32 = NewType("Hash32", bytes)
BlockNumber = NewType("BlockNumber", int)
BlockParams = Literal["latest", "earliest", "pending", "safe", "finalized"]
BlockIdentifier = Union[BlockParams, BlockNumber, Hash32, HexStr, int]
Address = NewType("Address", bytes)
HexAddress = NewType("HexAddress", HexStr)
ChecksumAddress = NewType("ChecksumAddress", HexAddress)
AnyAddress = TypeVar("AnyAddress", Address, HexAddress, ChecksumAddress)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
from importlib.metadata import (
version as __version,
)
# from .abi import (
# event_abi_to_log_topic,
# event_signature_to_log_topic,
# function_abi_to_4byte_selector,
# function_signature_to_4byte_selector,
# )
from .address import (
is_address,
is_binary_address,
is_canonical_address,
is_checksum_address,
is_checksum_formatted_address,
is_hex_address,
is_normalized_address,
is_same_address,
to_canonical_address,
to_checksum_address,
to_normalized_address,
)
from .applicators import (
apply_formatter_at_index,
apply_formatter_if,
apply_formatter_to_array,
apply_formatters_to_dict,
apply_formatters_to_sequence,
apply_key_map,
apply_one_of_formatters,
combine_argument_formatters,
)
from .conversions import (
hexstr_if_str,
text_if_str,
to_bytes,
to_hex,
to_int,
to_text,
)
from .currency import (
denoms,
from_wei,
to_wei,
)
from .decorators import (
combomethod,
replace_exceptions,
)
from .encoding import (
big_endian_to_int,
int_to_big_endian,
)
from .exceptions import (
ValidationError,
)
from .functional import (
apply_to_return_value,
flatten_return,
reversed_return,
sort_return,
to_dict,
to_list,
to_ordered_dict,
to_set,
to_tuple,
)
from .hexadecimal import (
add_0x_prefix,
decode_hex,
encode_hex,
is_0x_prefixed,
is_hex,
is_hexstr,
remove_0x_prefix,
)
from .humanize import (
humanize_bytes,
humanize_hash,
humanize_integer_sequence,
humanize_ipfs_uri,
humanize_seconds,
humanize_wei,
)
from .logging import (
DEBUG2_LEVEL_NUM,
ExtendedDebugLogger,
HasExtendedDebugLogger,
HasExtendedDebugLoggerMeta,
HasLogger,
HasLoggerMeta,
get_extended_debug_logger,
get_logger,
setup_DEBUG2_logging,
)
from .module_loading import (
import_string,
)
from .numeric import (
clamp,
)
from .types import (
is_boolean,
is_bytes,
is_dict,
is_integer,
is_list,
is_list_like,
is_null,
is_number,
is_string,
is_text,
is_tuple,
)

View File

@@ -0,0 +1,72 @@
from typing import (
Any,
Dict,
)
from .conversions import (
to_bytes
)
from ...keccak import (
SHA3 as keccak,
)
def collapse_if_tuple(abi: Dict[str, Any]) -> str:
"""
Converts a tuple from a dict to a parenthesized list of its types.
>>> from eth_utils.abi import collapse_if_tuple
>>> collapse_if_tuple(
... {
... 'components': [
... {'name': 'anAddress', 'type': 'address'},
... {'name': 'anInt', 'type': 'uint256'},
... {'name': 'someBytes', 'type': 'bytes'},
... ],
... 'type': 'tuple',
... }
... )
'(address,uint256,bytes)'
"""
typ = abi["type"]
if not isinstance(typ, str):
raise TypeError(
f"The 'type' must be a string, but got {repr(typ)} of type {type(typ)}"
)
elif not typ.startswith("tuple"):
return typ
delimited = ",".join(collapse_if_tuple(c) for c in abi["components"])
# Whatever comes after "tuple" is the array dims. The ABI spec states that
# this will have the form "", "[]", or "[k]".
array_dim = typ[5:]
collapsed = f"({delimited}){array_dim}"
return collapsed
def _abi_to_signature(abi: Dict[str, Any]) -> str:
fn_input_types = ",".join(
[collapse_if_tuple(abi_input) for abi_input in abi.get("inputs", [])]
)
function_signature = f"{abi['name']}({fn_input_types})"
return function_signature
def function_signature_to_4byte_selector(event_signature: str) -> bytes:
return keccak(to_bytes(text=event_signature.replace(" ", "")))[:4]
def function_abi_to_4byte_selector(function_abi: Dict[str, Any]) -> bytes:
function_signature = _abi_to_signature(function_abi)
return function_signature_to_4byte_selector(function_signature)
def event_signature_to_log_topic(event_signature: str) -> bytes:
return keccak(to_bytes(text=event_signature.replace(" ", "")))
def event_abi_to_log_topic(event_abi: Dict[str, Any]) -> bytes:
event_signature = _abi_to_signature(event_abi)
return event_signature_to_log_topic(event_signature)

View File

@@ -0,0 +1,171 @@
import re
from typing import (
Any,
Union,
cast,
)
from ..typing import (
Address,
AnyAddress,
ChecksumAddress,
HexAddress,
HexStr,
)
from .conversions import (
hexstr_if_str,
to_hex,
to_bytes,
)
from ...keccak import (
SHA3 as keccak,
)
from .hexadecimal import (
add_0x_prefix,
decode_hex,
encode_hex,
remove_0x_prefix,
)
from .types import (
is_bytes,
is_text,
)
_HEX_ADDRESS_REGEXP = re.compile("(0x)?[0-9a-f]{40}", re.IGNORECASE | re.ASCII)
def is_hex_address(value: Any) -> bool:
"""
Checks if the given string of text type is an address in hexadecimal encoded form.
"""
if not is_text(value):
return False
return _HEX_ADDRESS_REGEXP.fullmatch(value) is not None
def is_binary_address(value: Any) -> bool:
"""
Checks if the given string is an address in raw bytes form.
"""
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
def is_address(value: Any) -> bool:
"""
Is the given string an address in any of the known formats?
"""
if is_hex_address(value):
if _is_checksum_formatted(value):
return is_checksum_address(value)
return True
if is_binary_address(value):
return True
return False
def to_normalized_address(value: Union[AnyAddress, str, bytes]) -> HexAddress:
"""
Converts an address to its normalized hexadecimal representation.
"""
try:
hex_address = hexstr_if_str(to_hex, value).lower()
except AttributeError:
raise TypeError(f"Value must be any string, instead got type {type(value)}")
if is_address(hex_address):
return HexAddress(HexStr(hex_address))
else:
raise ValueError(
f"Unknown format {repr(value)}, attempted to normalize to "
f"{repr(hex_address)}"
)
def is_normalized_address(value: Any) -> bool:
"""
Returns whether the provided value is an address in its normalized form.
"""
if not is_address(value):
return False
else:
is_equal = value == to_normalized_address(value)
return cast(bool, is_equal)
def to_canonical_address(address: Union[AnyAddress, str, bytes]) -> Address:
"""
Convert a valid address to its canonical form (20-length bytes).
"""
return Address(decode_hex(to_normalized_address(address)))
def is_canonical_address(address: Any) -> bool:
"""
Returns `True` if the `value` is an address in its canonical form.
"""
if not is_bytes(address) or len(address) != 20:
return False
is_equal = address == to_canonical_address(address)
return cast(bool, is_equal)
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool:
"""
Checks if both addresses are same or not.
"""
if not is_address(left) or not is_address(right):
raise ValueError("Both values must be valid addresses")
else:
return bool(to_normalized_address(left) == to_normalized_address(right))
def to_checksum_address(value: Union[AnyAddress, str, bytes]) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(to_bytes(text=remove_0x_prefix(HexStr(norm_address)))))
checksum_address = add_0x_prefix(
HexStr(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
)
return ChecksumAddress(HexAddress(checksum_address))
def is_checksum_address(value: Any) -> bool:
if not is_text(value):
return False
if not is_hex_address(value):
return False
is_equal = value == to_checksum_address(value)
return cast(bool, is_equal)
def _is_checksum_formatted(value: Any) -> bool:
unprefixed_value = remove_0x_prefix(value)
return (
not unprefixed_value.islower()
and not unprefixed_value.isupper()
and not unprefixed_value.isnumeric()
)
def is_checksum_formatted_address(value: Any) -> bool:
return is_hex_address(value) and _is_checksum_formatted(value)

View File

@@ -0,0 +1,151 @@
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Tuple,
)
import warnings
from .decorators import (
return_arg_type,
)
from .functional import (
to_dict,
)
from .toolz import (
compose,
curry,
)
Formatters = Callable[[List[Any]], List[Any]]
@return_arg_type(2)
def apply_formatter_at_index(
formatter: Callable[..., Any], at_index: int, value: List[Any]
) -> Generator[List[Any], None, None]:
if at_index + 1 > len(value):
raise IndexError(
f"Not enough values in iterable to apply formatter. Got: {len(value)}. "
f"Need: {at_index + 1}"
)
for index, item in enumerate(value):
if index == at_index:
yield formatter(item)
else:
yield item
def combine_argument_formatters(*formatters: List[Callable[..., Any]]) -> Formatters:
warnings.warn(
DeprecationWarning(
"combine_argument_formatters(formatter1, formatter2)([item1, item2])"
"has been deprecated and will be removed in a subsequent major version "
"release of the eth-utils library. Update your calls to use "
"apply_formatters_to_sequence([formatter1, formatter2], [item1, item2]) "
"instead."
),
stacklevel=2,
)
_formatter_at_index = curry(apply_formatter_at_index)
return compose( # type: ignore
*(
_formatter_at_index(formatter, index)
for index, formatter in enumerate(formatters)
)
)
@return_arg_type(1)
def apply_formatters_to_sequence(
formatters: List[Any], sequence: List[Any]
) -> Generator[List[Any], None, None]:
if len(formatters) > len(sequence):
raise IndexError(
f"Too many formatters for sequence: {len(formatters)} formatters for "
f"{repr(sequence)}"
)
elif len(formatters) < len(sequence):
raise IndexError(
f"Too few formatters for sequence: {len(formatters)} formatters for "
f"{repr(sequence)}"
)
else:
for formatter, item in zip(formatters, sequence):
yield formatter(item)
def apply_formatter_if(
condition: Callable[..., bool], formatter: Callable[..., Any], value: Any
) -> Any:
if condition(value):
return formatter(value)
else:
return value
@to_dict
def apply_formatters_to_dict(
formatters: Dict[Any, Any], value: Dict[Any, Any]
) -> Generator[Tuple[Any, Any], None, None]:
for key, item in value.items():
if key in formatters:
try:
yield key, formatters[key](item)
except ValueError as exc:
new_error_message = (
f"Could not format invalid value {repr(item)} as field {repr(key)}"
)
raise ValueError(new_error_message) from exc
except TypeError as exc:
new_error_message = (
f"Could not format invalid type {repr(item)} as field {repr(key)}"
)
raise TypeError(new_error_message) from exc
else:
yield key, item
@return_arg_type(1)
def apply_formatter_to_array(
formatter: Callable[..., Any], value: List[Any]
) -> Generator[List[Any], None, None]:
for item in value:
yield formatter(item)
def apply_one_of_formatters(
formatter_condition_pairs: Tuple[Tuple[Callable[..., Any], Callable[..., Any]]],
value: Any,
) -> Any:
for condition, formatter in formatter_condition_pairs:
if condition(value):
return formatter(value)
else:
raise ValueError(
"The provided value did not satisfy any of the formatter conditions"
)
@to_dict
def apply_key_map(
key_mappings: Dict[Any, Any], value: Dict[Any, Any]
) -> Generator[Tuple[Any, Any], None, None]:
key_conflicts = (
set(value.keys())
.difference(key_mappings.keys())
.intersection(v for k, v in key_mappings.items() if v in value)
)
if key_conflicts:
raise KeyError(
f"Could not apply key map due to conflicting key(s): {key_conflicts}"
)
for key, item in value.items():
if key in key_mappings:
yield key_mappings[key], item
else:
yield key, item

View File

@@ -0,0 +1,190 @@
from typing import (
Callable,
Optional,
TypeVar,
Union,
cast,
)
from ..typing import (
HexStr,
Primitives,
)
from .decorators import (
validate_conversion_arguments,
)
from .encoding import (
big_endian_to_int,
int_to_big_endian,
)
from .hexadecimal import (
add_0x_prefix,
decode_hex,
encode_hex,
is_hexstr,
remove_0x_prefix,
)
from .types import (
is_boolean,
is_integer,
is_string,
)
T = TypeVar("T")
@validate_conversion_arguments
def to_hex(
primitive: Optional[Primitives] = None,
hexstr: Optional[HexStr] = None,
text: Optional[str] = None,
) -> HexStr:
"""
Auto converts any supported value into its hex representation.
Trims leading zeros, as defined in:
https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
"""
if hexstr is not None:
return add_0x_prefix(HexStr(hexstr.lower()))
if text is not None:
return encode_hex(text.encode("utf-8"))
if is_boolean(primitive):
return HexStr("0x1") if primitive else HexStr("0x0")
if isinstance(primitive, (bytes, bytearray)):
return encode_hex(primitive)
elif is_string(primitive):
raise TypeError(
"Unsupported type: The primitive argument must be one of: bytes,"
"bytearray, int or bool and not str"
)
if is_integer(primitive):
return HexStr(hex(cast(int, primitive)))
raise TypeError(
f"Unsupported type: '{repr(type(primitive))}'. Must be one of: bool, str, "
"bytes, bytearray or int."
)
@validate_conversion_arguments
def to_int(
primitive: Optional[Primitives] = None,
hexstr: Optional[HexStr] = None,
text: Optional[str] = None,
) -> int:
"""
Converts value to its integer representation.
Values are converted this way:
* primitive:
* bytes, bytearrays: big-endian integer
* bool: True => 1, False => 0
* hexstr: interpret hex as integer
* text: interpret as string of digits, like '12' => 12
"""
if hexstr is not None:
return int(hexstr, 16)
elif text is not None:
return int(text)
elif isinstance(primitive, (bytes, bytearray)):
return big_endian_to_int(primitive)
elif isinstance(primitive, str):
raise TypeError("Pass in strings with keyword hexstr or text")
elif isinstance(primitive, (int, bool)):
return int(primitive)
else:
raise TypeError(
"Invalid type. Expected one of int/bool/str/bytes/bytearray. Got "
f"{type(primitive)}"
)
@validate_conversion_arguments
def to_bytes(
primitive: Optional[Primitives] = None,
hexstr: Optional[HexStr] = None,
text: Optional[str] = None,
) -> bytes:
if is_boolean(primitive):
return b"\x01" if primitive else b"\x00"
elif isinstance(primitive, bytearray):
return bytes(primitive)
elif isinstance(primitive, bytes):
return primitive
elif is_integer(primitive):
return to_bytes(hexstr=to_hex(primitive))
elif hexstr is not None:
if len(hexstr) % 2:
hexstr = cast(HexStr, "0x0" + remove_0x_prefix(hexstr))
return decode_hex(hexstr)
elif text is not None:
return text.encode("utf-8")
raise TypeError(
"expected a bool, int, byte or bytearray in first arg, "
"or keyword of hexstr or text"
)
@validate_conversion_arguments
def to_text(
primitive: Optional[Primitives] = None,
hexstr: Optional[HexStr] = None,
text: Optional[str] = None,
) -> str:
if hexstr is not None:
return to_bytes(hexstr=hexstr).decode("utf-8")
elif text is not None:
return text
elif isinstance(primitive, str):
return to_text(hexstr=primitive)
elif isinstance(primitive, (bytes, bytearray)):
return primitive.decode("utf-8")
elif is_integer(primitive):
byte_encoding = int_to_big_endian(cast(int, primitive))
return to_text(byte_encoding)
raise TypeError("Expected an int, bytes, bytearray or hexstr.")
def text_if_str(
to_type: Callable[..., T], text_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only unicode text (not a hexstr).
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param text_or_primitive bytes, str, int: value to convert
"""
if isinstance(text_or_primitive, str):
return to_type(text=text_or_primitive)
else:
return to_type(text_or_primitive)
def hexstr_if_str(
to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str]
) -> T:
"""
Convert to a type, assuming that strings can be only hexstr (not unicode text).
:param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
:param hexstr_or_primitive bytes, str, int: value to convert
"""
if isinstance(hexstr_or_primitive, str):
if remove_0x_prefix(HexStr(hexstr_or_primitive)) and not is_hexstr(
hexstr_or_primitive
):
raise ValueError(
"when sending a str, it must be a hex string. "
f"Got: {repr(hexstr_or_primitive)}"
)
return to_type(hexstr=hexstr_or_primitive)
else:
return to_type(hexstr_or_primitive)

View File

@@ -0,0 +1,107 @@
import decimal
from decimal import (
localcontext,
)
from typing import (
Union,
)
from .types import (
is_integer,
is_string,
)
from .units import (
units,
)
class denoms:
wei = int(units["wei"])
kwei = int(units["kwei"])
babbage = int(units["babbage"])
femtoether = int(units["femtoether"])
mwei = int(units["mwei"])
lovelace = int(units["lovelace"])
picoether = int(units["picoether"])
gwei = int(units["gwei"])
shannon = int(units["shannon"])
nanoether = int(units["nanoether"])
nano = int(units["nano"])
szabo = int(units["szabo"])
microether = int(units["microether"])
micro = int(units["micro"])
finney = int(units["finney"])
milliether = int(units["milliether"])
milli = int(units["milli"])
ether = int(units["ether"])
kether = int(units["kether"])
grand = int(units["grand"])
mether = int(units["mether"])
gether = int(units["gether"])
tether = int(units["tether"])
MIN_WEI = 0
MAX_WEI = 2**256 - 1
def from_wei(number: int, unit: str) -> Union[int, decimal.Decimal]:
"""
Takes a number of wei and converts it to any other ether unit.
"""
if unit.lower() not in units:
raise ValueError(f"Unknown unit. Must be one of {'/'.join(units.keys())}")
if number == 0:
return 0
if number < MIN_WEI or number > MAX_WEI:
raise ValueError("value must be between 1 and 2**256 - 1")
unit_value = units[unit.lower()]
with localcontext() as ctx:
ctx.prec = 999
d_number = decimal.Decimal(value=number, context=ctx)
result_value = d_number / unit_value
return result_value
def to_wei(number: Union[int, float, str, decimal.Decimal], unit: str) -> int:
"""
Takes a number of a unit and converts it to wei.
"""
if unit.lower() not in units:
raise ValueError(f"Unknown unit. Must be one of {'/'.join(units.keys())}")
if is_integer(number) or is_string(number):
d_number = decimal.Decimal(value=number)
elif isinstance(number, float):
d_number = decimal.Decimal(value=str(number))
elif isinstance(number, decimal.Decimal):
d_number = number
else:
raise TypeError("Unsupported type. Must be one of integer, float, or string")
s_number = str(number)
unit_value = units[unit.lower()]
if d_number == decimal.Decimal(0):
return 0
if d_number < 1 and "." in s_number:
with localcontext() as ctx:
multiplier = len(s_number) - s_number.index(".") - 1
ctx.prec = multiplier
d_number = decimal.Decimal(value=number, context=ctx) * 10**multiplier
unit_value /= 10**multiplier
with localcontext() as ctx:
ctx.prec = 999
result_value = decimal.Decimal(value=d_number, context=ctx) * unit_value
if result_value < MIN_WEI or result_value > MAX_WEI:
raise ValueError("Resulting wei value must be between 1 and 2**256 - 1")
return int(result_value)

View File

@@ -0,0 +1,269 @@
from typing import (
Any,
Callable,
Dict,
Generator,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
overload,
)
from .. import (
ExtendedDebugLogger,
HasExtendedDebugLogger,
HasExtendedDebugLoggerMeta,
HasLogger,
HasLoggerMeta,
ValidationError,
add_0x_prefix,
apply_formatter_at_index,
apply_formatter_if as non_curried_apply_formatter_if,
apply_formatter_to_array,
apply_formatters_to_dict as non_curried_apply_formatters_to_dict,
apply_formatters_to_sequence,
apply_key_map,
apply_one_of_formatters as non_curried_apply_one_of_formatters,
apply_to_return_value,
big_endian_to_int,
clamp,
combine_argument_formatters,
combomethod,
decode_hex,
denoms,
encode_hex,
# event_abi_to_log_topic,
# event_signature_to_log_topic,
flatten_return,
from_wei,
# function_abi_to_4byte_selector,
# function_signature_to_4byte_selector,
get_extended_debug_logger,
get_logger,
hexstr_if_str as non_curried_hexstr_if_str,
humanize_bytes,
humanize_hash,
humanize_integer_sequence,
humanize_ipfs_uri,
humanize_seconds,
humanize_wei,
import_string,
int_to_big_endian,
is_0x_prefixed,
is_address,
is_binary_address,
is_boolean,
is_bytes,
is_canonical_address,
is_checksum_address,
is_checksum_formatted_address,
is_dict,
is_hex,
is_hex_address,
is_hexstr,
is_integer,
is_list,
is_list_like,
is_normalized_address,
is_null,
is_number,
is_same_address,
is_string,
is_text,
is_tuple,
# keccak,
remove_0x_prefix,
replace_exceptions,
reversed_return,
setup_DEBUG2_logging,
sort_return,
text_if_str as non_curried_text_if_str,
to_bytes,
to_canonical_address,
to_checksum_address,
to_dict,
to_hex,
to_int,
to_list,
to_normalized_address,
to_ordered_dict,
to_set,
to_text,
to_tuple,
to_wei,
)
from ..toolz import (
curry,
)
TReturn = TypeVar("TReturn")
TValue = TypeVar("TValue")
@overload
def apply_formatter_if(
condition: Callable[..., bool]
) -> Callable[[Callable[..., TReturn]], Callable[[TValue], Union[TReturn, TValue]]]:
pass
@overload
def apply_formatter_if(
condition: Callable[..., bool], formatter: Callable[..., TReturn]
) -> Callable[[TValue], Union[TReturn, TValue]]:
pass
@overload
def apply_formatter_if(
condition: Callable[..., bool], formatter: Callable[..., TReturn], value: TValue
) -> Union[TReturn, TValue]:
pass
# This is just a stub to appease mypy, it gets overwritten later
def apply_formatter_if( # type: ignore
condition: Callable[..., bool],
formatter: Optional[Callable[..., TReturn]] = None,
value: Optional[TValue] = None,
) -> Union[
Callable[[Callable[..., TReturn]], Callable[[TValue], Union[TReturn, TValue]]],
Callable[[TValue], Union[TReturn, TValue]],
TReturn,
TValue,
]:
pass
@overload
def apply_one_of_formatters(
formatter_condition_pairs: Sequence[
Tuple[Callable[..., bool], Callable[..., TReturn]]
]
) -> Callable[[TValue], TReturn]:
...
@overload
def apply_one_of_formatters(
formatter_condition_pairs: Sequence[
Tuple[Callable[..., bool], Callable[..., TReturn]]
],
value: TValue,
) -> TReturn:
...
# This is just a stub to appease mypy, it gets overwritten later
def apply_one_of_formatters( # type: ignore
formatter_condition_pairs: Sequence[
Tuple[Callable[..., bool], Callable[..., TReturn]]
],
value: Optional[TValue] = None,
) -> TReturn:
...
@overload
def hexstr_if_str(
to_type: Callable[..., TReturn]
) -> Callable[[Union[bytes, int, str]], TReturn]:
...
@overload
def hexstr_if_str(
to_type: Callable[..., TReturn], to_format: Union[bytes, int, str]
) -> TReturn:
...
# This is just a stub to appease mypy, it gets overwritten later
def hexstr_if_str( # type: ignore
to_type: Callable[..., TReturn], to_format: Optional[Union[bytes, int, str]] = None
) -> TReturn:
...
@overload
def text_if_str(
to_type: Callable[..., TReturn]
) -> Callable[[Union[bytes, int, str]], TReturn]:
...
@overload
def text_if_str(
to_type: Callable[..., TReturn], text_or_primitive: Union[bytes, int, str]
) -> TReturn:
...
# This is just a stub to appease mypy, it gets overwritten later
def text_if_str( # type: ignore
to_type: Callable[..., TReturn],
text_or_primitive: Optional[Union[bytes, int, str]] = None,
) -> TReturn:
...
@overload
def apply_formatters_to_dict(
formatters: Dict[Any, Any]
) -> Callable[[Dict[Any, Any]], TReturn]:
...
@overload
def apply_formatters_to_dict(
formatters: Dict[Any, Any], value: Dict[Any, Any]
) -> Dict[Any, Any]:
...
# This is just a stub to appease mypy, it gets overwritten later
def apply_formatters_to_dict( # type: ignore
formatters: Dict[Any, Any], value: Optional[Dict[Any, Any]] = None
) -> Dict[Any, Any]:
...
apply_formatter_at_index = curry(apply_formatter_at_index)
apply_formatter_if = curry(non_curried_apply_formatter_if) # noqa: F811
apply_formatter_to_array = curry(apply_formatter_to_array)
apply_formatters_to_dict = curry(non_curried_apply_formatters_to_dict) # noqa: F811
apply_formatters_to_sequence = curry(apply_formatters_to_sequence)
apply_key_map = curry(apply_key_map)
apply_one_of_formatters = curry(non_curried_apply_one_of_formatters) # noqa: F811
from_wei = curry(from_wei)
get_logger = curry(get_logger)
hexstr_if_str = curry(non_curried_hexstr_if_str) # noqa: F811
is_same_address = curry(is_same_address)
text_if_str = curry(non_curried_text_if_str) # noqa: F811
to_wei = curry(to_wei)
clamp = curry(clamp)
# Delete any methods and classes that are not intended to be importable from
# `eth_utils.curried`. We do this approach instead of __all__ because this approach
# actually prevents importing the wrong thing, while __all__ only affects
# `from eth_utils.curried import *`
del Any
del Callable
del Dict
del Generator
del Optional
del Sequence
del TReturn
del TValue
del Tuple
del TypeVar
del Union
del curry
del non_curried_apply_formatter_if
del non_curried_apply_one_of_formatters
del non_curried_apply_formatters_to_dict
del non_curried_hexstr_if_str
del non_curried_text_if_str
del overload

View File

@@ -0,0 +1,20 @@
import platform
import subprocess
import sys
def pip_freeze() -> str:
result = subprocess.run("python -m pip freeze".split(), stdout=subprocess.PIPE)
return f"python -m pip freeze result:\n{result.stdout.decode()}"
def python_version() -> str:
return f"Python version:\n{sys.version}"
def platform_info() -> str:
return f"Operating System: {platform.platform()}"
def get_environment_summary() -> str:
return "\n\n".join([python_version(), platform_info(), pip_freeze()])

View File

@@ -0,0 +1,132 @@
import functools
import itertools
from typing import (
Any,
Callable,
Dict,
Optional,
Type,
TypeVar,
)
from .types import (
is_text,
)
T = TypeVar("T")
class combomethod:
def __init__(self, method: Callable[..., Any]) -> None:
self.method = method
def __get__(
self, obj: Optional[T] = None, objtype: Optional[Type[T]] = None
) -> Callable[..., Any]:
@functools.wraps(self.method)
def _wrapper(*args: Any, **kwargs: Any) -> Any:
if obj is not None:
return self.method(obj, *args, **kwargs)
else:
return self.method(objtype, *args, **kwargs)
return _wrapper
def _has_one_val(*args: T, **kwargs: T) -> bool:
vals = itertools.chain(args, kwargs.values())
not_nones = list(filter(lambda val: val is not None, vals))
return len(not_nones) == 1
def _assert_one_val(*args: T, **kwargs: T) -> None:
if not _has_one_val(*args, **kwargs):
raise TypeError(
"Exactly one of the passed values can be specified. "
f"Instead, values were: {repr(args)}, {repr(kwargs)}"
)
def _hexstr_or_text_kwarg_is_text_type(**kwargs: T) -> bool:
value = kwargs["hexstr"] if "hexstr" in kwargs else kwargs["text"]
return is_text(value)
def _assert_hexstr_or_text_kwarg_is_text_type(**kwargs: T) -> None:
if not _hexstr_or_text_kwarg_is_text_type(**kwargs):
raise TypeError(
"Arguments passed as hexstr or text must be of text type. "
f"Instead, value was: {(repr(next(iter(list(kwargs.values())))))}"
)
def _validate_supported_kwarg(kwargs: Any) -> None:
if next(iter(kwargs)) not in ["primitive", "hexstr", "text"]:
raise TypeError(
"Kwarg must be 'primitive', 'hexstr', or 'text'. "
f"Instead, kwarg was: {repr((next(iter(kwargs))))}"
)
def validate_conversion_arguments(to_wrap: Callable[..., T]) -> Callable[..., T]:
"""
Validates arguments for conversion functions.
- Only a single argument is present
- Kwarg must be 'primitive' 'hexstr' or 'text'
- If it is 'hexstr' or 'text' that it is a text type
"""
@functools.wraps(to_wrap)
def wrapper(*args: Any, **kwargs: Any) -> T:
_assert_one_val(*args, **kwargs)
if kwargs:
_validate_supported_kwarg(kwargs)
if len(args) == 0 and "primitive" not in kwargs:
_assert_hexstr_or_text_kwarg_is_text_type(**kwargs)
return to_wrap(*args, **kwargs)
return wrapper
def return_arg_type(at_position: int) -> Callable[..., Callable[..., T]]:
"""
Wrap the return value with the result of `type(args[at_position])`.
"""
def decorator(to_wrap: Callable[..., Any]) -> Callable[..., T]:
@functools.wraps(to_wrap)
def wrapper(*args: Any, **kwargs: Any) -> T: # type: ignore
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result) # type: ignore
return wrapper
return decorator
def replace_exceptions(
old_to_new_exceptions: Dict[Type[BaseException], Type[BaseException]]
) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""
Replaces old exceptions with new exceptions to be raised in their place.
"""
old_exceptions = tuple(old_to_new_exceptions.keys())
def decorator(to_wrap: Callable[..., T]) -> Callable[..., T]:
@functools.wraps(to_wrap)
def wrapped(*args: Any, **kwargs: Any) -> T:
try:
return to_wrap(*args, **kwargs)
except old_exceptions as err:
try:
raise old_to_new_exceptions[type(err)](err) from err
except KeyError:
raise TypeError(
f"could not look up new exception to use for {repr(err)}"
) from err
return wrapped
return decorator

View File

@@ -0,0 +1,6 @@
def int_to_big_endian(value: int) -> bytes:
return value.to_bytes((value.bit_length() + 7) // 8 or 1, "big")
def big_endian_to_int(value: bytes) -> int:
return int.from_bytes(value, "big")

View File

@@ -0,0 +1,4 @@
class ValidationError(Exception):
"""
Raised when something does not pass a validation check.
"""

View File

@@ -0,0 +1,75 @@
import collections
import functools
import itertools
from typing import ( # noqa: F401
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Set,
Tuple,
TypeVar,
Union,
)
from .toolz import (
compose as _compose,
)
T = TypeVar("T")
def identity(value: T) -> T:
return value
TGIn = TypeVar("TGIn")
TGOut = TypeVar("TGOut")
TFOut = TypeVar("TFOut")
def combine(
f: Callable[[TGOut], TFOut], g: Callable[[TGIn], TGOut]
) -> Callable[[TGIn], TFOut]:
return lambda x: f(g(x))
def apply_to_return_value(
callback: Callable[..., T]
) -> Callable[..., Callable[..., T]]:
def outer(fn: Callable[..., T]) -> Callable[..., T]:
# We would need to type annotate *args and **kwargs but doing so segfaults
# the PyPy builds. We ignore instead.
@functools.wraps(fn)
def inner(*args, **kwargs) -> T: # type: ignore
return callback(fn(*args, **kwargs))
return inner
return outer
TVal = TypeVar("TVal")
TKey = TypeVar("TKey")
to_tuple = apply_to_return_value(
tuple
) # type: Callable[[Callable[..., Iterable[TVal]]], Callable[..., Tuple[TVal, ...]]] # noqa: E501
to_list = apply_to_return_value(
list
) # type: Callable[[Callable[..., Iterable[TVal]]], Callable[..., List[TVal]]] # noqa: E501
to_set = apply_to_return_value(
set
) # type: Callable[[Callable[..., Iterable[TVal]]], Callable[..., Set[TVal]]] # noqa: E501
to_dict = apply_to_return_value(
dict
) # type: Callable[[Callable[..., Iterable[Union[Mapping[TKey, TVal], Tuple[TKey, TVal]]]]], Callable[..., Dict[TKey, TVal]]] # noqa: E501
to_ordered_dict = apply_to_return_value(
collections.OrderedDict
) # type: Callable[[Callable[..., Iterable[Union[Mapping[TKey, TVal], Tuple[TKey, TVal]]]]], Callable[..., collections.OrderedDict[TKey, TVal]]] # noqa: E501
sort_return = _compose(to_tuple, apply_to_return_value(sorted))
flatten_return = _compose(
to_tuple, apply_to_return_value(itertools.chain.from_iterable)
)
reversed_return = _compose(to_tuple, apply_to_return_value(reversed), to_tuple)

Some files were not shown because too many files have changed in this diff Show More