add
This commit is contained in:
16
ccxt/static_dependencies/ethereum/abi/__init__.py
Normal file
16
ccxt/static_dependencies/ethereum/abi/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
from .abi import (
|
||||
decode,
|
||||
decode_abi,
|
||||
decode_single,
|
||||
encode,
|
||||
encode_abi,
|
||||
encode_single,
|
||||
is_encodable,
|
||||
is_encodable_type,
|
||||
)
|
||||
|
||||
# This code from: https://github.com/ethereum/eth-abi/tree/v3.0.1
|
||||
__version__ = 'ccxt'
|
||||
|
||||
__all__ = ['decode','encode']
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
19
ccxt/static_dependencies/ethereum/abi/abi.py
Normal file
19
ccxt/static_dependencies/ethereum/abi/abi.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from .codec import (
|
||||
ABICodec,
|
||||
)
|
||||
from .registry import (
|
||||
registry,
|
||||
)
|
||||
|
||||
default_codec = ABICodec(registry)
|
||||
|
||||
encode = default_codec.encode
|
||||
encode_abi = default_codec.encode_abi # deprecated
|
||||
encode_single = default_codec.encode_single # deprecated
|
||||
|
||||
decode = default_codec.decode
|
||||
decode_abi = default_codec.decode_abi # deprecated
|
||||
decode_single = default_codec.decode_single # deprecated
|
||||
|
||||
is_encodable = default_codec.is_encodable
|
||||
is_encodable_type = default_codec.is_encodable_type
|
||||
152
ccxt/static_dependencies/ethereum/abi/base.py
Normal file
152
ccxt/static_dependencies/ethereum/abi/base.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import functools
|
||||
|
||||
from ..typing.abi import (
|
||||
TypeStr,
|
||||
)
|
||||
|
||||
from .grammar import (
|
||||
BasicType,
|
||||
TupleType,
|
||||
normalize,
|
||||
parse,
|
||||
)
|
||||
|
||||
|
||||
def parse_type_str(expected_base=None, with_arrlist=False):
|
||||
"""
|
||||
Used by BaseCoder subclasses as a convenience for implementing the
|
||||
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
|
||||
then parsing a type string with an (optional) expected base is required in
|
||||
that method.
|
||||
"""
|
||||
|
||||
def decorator(old_from_type_str):
|
||||
@functools.wraps(old_from_type_str)
|
||||
def new_from_type_str(cls, type_str, registry):
|
||||
normalized_type_str = normalize(type_str)
|
||||
abi_type = parse(normalized_type_str)
|
||||
|
||||
type_str_repr = repr(type_str)
|
||||
if type_str != normalized_type_str:
|
||||
type_str_repr = "{} (normalized to {})".format(
|
||||
type_str_repr,
|
||||
repr(normalized_type_str),
|
||||
)
|
||||
|
||||
if expected_base is not None:
|
||||
if not isinstance(abi_type, BasicType):
|
||||
raise ValueError(
|
||||
"Cannot create {} for non-basic type {}".format(
|
||||
cls.__name__,
|
||||
type_str_repr,
|
||||
)
|
||||
)
|
||||
if abi_type.base != expected_base:
|
||||
raise ValueError(
|
||||
"Cannot create {} for type {}: expected type with "
|
||||
"base '{}'".format(
|
||||
cls.__name__,
|
||||
type_str_repr,
|
||||
expected_base,
|
||||
)
|
||||
)
|
||||
|
||||
if not with_arrlist and abi_type.arrlist is not None:
|
||||
raise ValueError(
|
||||
"Cannot create {} for type {}: expected type with "
|
||||
"no array dimension list".format(
|
||||
cls.__name__,
|
||||
type_str_repr,
|
||||
)
|
||||
)
|
||||
if with_arrlist and abi_type.arrlist is None:
|
||||
raise ValueError(
|
||||
"Cannot create {} for type {}: expected type with "
|
||||
"array dimension list".format(
|
||||
cls.__name__,
|
||||
type_str_repr,
|
||||
)
|
||||
)
|
||||
|
||||
# Perform general validation of default solidity types
|
||||
abi_type.validate()
|
||||
|
||||
return old_from_type_str(cls, abi_type, registry)
|
||||
|
||||
return classmethod(new_from_type_str)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def parse_tuple_type_str(old_from_type_str):
|
||||
"""
|
||||
Used by BaseCoder subclasses as a convenience for implementing the
|
||||
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
|
||||
then parsing a tuple type string is required in that method.
|
||||
"""
|
||||
|
||||
@functools.wraps(old_from_type_str)
|
||||
def new_from_type_str(cls, type_str, registry):
|
||||
normalized_type_str = normalize(type_str)
|
||||
abi_type = parse(normalized_type_str)
|
||||
|
||||
type_str_repr = repr(type_str)
|
||||
if type_str != normalized_type_str:
|
||||
type_str_repr = "{} (normalized to {})".format(
|
||||
type_str_repr,
|
||||
repr(normalized_type_str),
|
||||
)
|
||||
|
||||
if not isinstance(abi_type, TupleType):
|
||||
raise ValueError(
|
||||
"Cannot create {} for non-tuple type {}".format(
|
||||
cls.__name__,
|
||||
type_str_repr,
|
||||
)
|
||||
)
|
||||
|
||||
abi_type.validate()
|
||||
|
||||
return old_from_type_str(cls, abi_type, registry)
|
||||
|
||||
return classmethod(new_from_type_str)
|
||||
|
||||
|
||||
class BaseCoder:
|
||||
"""
|
||||
Base class for all encoder and decoder classes.
|
||||
"""
|
||||
|
||||
is_dynamic = False
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
cls = type(self)
|
||||
|
||||
# Ensure no unrecognized kwargs were given
|
||||
for key, value in kwargs.items():
|
||||
if not hasattr(cls, key):
|
||||
raise AttributeError(
|
||||
"Property {key} not found on {cls_name} class. "
|
||||
"`{cls_name}.__init__` only accepts keyword arguments which are "
|
||||
"present on the {cls_name} class.".format(
|
||||
key=key,
|
||||
cls_name=cls.__name__,
|
||||
)
|
||||
)
|
||||
setattr(self, key, value)
|
||||
|
||||
# Validate given combination of kwargs
|
||||
self.validate()
|
||||
|
||||
def validate(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def from_type_str(
|
||||
cls, type_str: TypeStr, registry
|
||||
) -> "BaseCoder": # pragma: no cover
|
||||
"""
|
||||
Used by :any:`ABIRegistry` to get an appropriate encoder or decoder
|
||||
instance for the given type string and type registry.
|
||||
"""
|
||||
raise NotImplementedError("Must implement `from_type_str`")
|
||||
217
ccxt/static_dependencies/ethereum/abi/codec.py
Normal file
217
ccxt/static_dependencies/ethereum/abi/codec.py
Normal file
@@ -0,0 +1,217 @@
|
||||
from typing import (
|
||||
Any,
|
||||
Iterable,
|
||||
Tuple,
|
||||
)
|
||||
import warnings
|
||||
|
||||
from ..typing.abi import (
|
||||
Decodable,
|
||||
TypeStr,
|
||||
)
|
||||
from ..utils import (
|
||||
is_bytes,
|
||||
)
|
||||
|
||||
from .decoding import (
|
||||
ContextFramesBytesIO,
|
||||
TupleDecoder,
|
||||
)
|
||||
from .encoding import (
|
||||
TupleEncoder,
|
||||
)
|
||||
from .exceptions import (
|
||||
EncodingError,
|
||||
)
|
||||
from .registry import (
|
||||
ABIRegistry,
|
||||
)
|
||||
|
||||
|
||||
class BaseABICoder:
|
||||
"""
|
||||
Base class for porcelain coding APIs. These are classes which wrap
|
||||
instances of :class:`~.registry.ABIRegistry` to provide last-mile
|
||||
coding functionality.
|
||||
"""
|
||||
|
||||
def __init__(self, registry: ABIRegistry):
|
||||
"""
|
||||
Constructor.
|
||||
|
||||
:param registry: The registry providing the encoders to be used when
|
||||
encoding values.
|
||||
"""
|
||||
self._registry = registry
|
||||
|
||||
|
||||
class ABIEncoder(BaseABICoder):
|
||||
"""
|
||||
Wraps a registry to provide last-mile encoding functionality.
|
||||
"""
|
||||
|
||||
def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
|
||||
"""
|
||||
Encodes the python value ``arg`` as a binary value of the ABI type
|
||||
``typ``.
|
||||
|
||||
:param typ: The string representation of the ABI type that will be used
|
||||
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
|
||||
etc.
|
||||
:param arg: The python value to be encoded.
|
||||
|
||||
:returns: The binary representation of the python value ``arg`` as a
|
||||
value of the ABI type ``typ``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"abi.encode_single() and abi.encode_single_packed() are deprecated "
|
||||
"and will be removed in version 4.0.0 in favor of abi.encode() and "
|
||||
"abi.encode_packed(), respectively",
|
||||
category=DeprecationWarning,
|
||||
)
|
||||
|
||||
encoder = self._registry.get_encoder(typ)
|
||||
|
||||
return encoder(arg)
|
||||
|
||||
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
|
||||
"""
|
||||
Encodes the python values in ``args`` as a sequence of binary values of
|
||||
the ABI types in ``types`` via the head-tail mechanism.
|
||||
|
||||
:param types: An iterable of string representations of the ABI types
|
||||
that will be used for encoding e.g. ``('uint256', 'bytes[]',
|
||||
'(int,int)')``
|
||||
:param args: An iterable of python values to be encoded.
|
||||
|
||||
:returns: The head-tail encoded binary representation of the python
|
||||
values in ``args`` as values of the ABI types in ``types``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"abi.encode_abi() and abi.encode_abi_packed() are deprecated and will be "
|
||||
"removed in version 4.0.0 in favor of abi.encode() and "
|
||||
"abi.encode_packed(), respectively",
|
||||
category=DeprecationWarning,
|
||||
)
|
||||
return self.encode(types, args)
|
||||
|
||||
def encode(self, types, args):
|
||||
encoders = [self._registry.get_encoder(type_str) for type_str in types]
|
||||
|
||||
encoder = TupleEncoder(encoders=encoders)
|
||||
|
||||
return encoder(args)
|
||||
|
||||
def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
|
||||
"""
|
||||
Determines if the python value ``arg`` is encodable as a value of the
|
||||
ABI type ``typ``.
|
||||
|
||||
:param typ: A string representation for the ABI type against which the
|
||||
python value ``arg`` will be checked e.g. ``'uint256'``,
|
||||
``'bytes[]'``, ``'(int,int)'``, etc.
|
||||
:param arg: The python value whose encodability should be checked.
|
||||
|
||||
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
|
||||
``typ``. Otherwise, ``False``.
|
||||
"""
|
||||
encoder = self._registry.get_encoder(typ)
|
||||
|
||||
try:
|
||||
encoder.validate_value(arg)
|
||||
except EncodingError:
|
||||
return False
|
||||
except AttributeError:
|
||||
try:
|
||||
encoder(arg)
|
||||
except EncodingError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def is_encodable_type(self, typ: TypeStr) -> bool:
|
||||
"""
|
||||
Returns ``True`` if values for the ABI type ``typ`` can be encoded by
|
||||
this codec.
|
||||
|
||||
:param typ: A string representation for the ABI type that will be
|
||||
checked for encodability e.g. ``'uint256'``, ``'bytes[]'``,
|
||||
``'(int,int)'``, etc.
|
||||
|
||||
:returns: ``True`` if values for ``typ`` can be encoded by this codec.
|
||||
Otherwise, ``False``.
|
||||
"""
|
||||
return self._registry.has_encoder(typ)
|
||||
|
||||
|
||||
class ABIDecoder(BaseABICoder):
|
||||
"""
|
||||
Wraps a registry to provide last-mile decoding functionality.
|
||||
"""
|
||||
|
||||
stream_class = ContextFramesBytesIO
|
||||
|
||||
def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
|
||||
"""
|
||||
Decodes the binary value ``data`` of the ABI type ``typ`` into its
|
||||
equivalent python value.
|
||||
|
||||
:param typ: The string representation of the ABI type that will be used for
|
||||
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
|
||||
:param data: The binary value to be decoded.
|
||||
|
||||
:returns: The equivalent python value of the ABI value represented in
|
||||
``data``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"abi.decode_single() is deprecated and will be removed in version 4.0.0 "
|
||||
"in favor of abi.decode()",
|
||||
category=DeprecationWarning,
|
||||
)
|
||||
|
||||
if not is_bytes(data):
|
||||
raise TypeError(
|
||||
"The `data` value must be of bytes type. Got {0}".format(type(data))
|
||||
)
|
||||
|
||||
decoder = self._registry.get_decoder(typ)
|
||||
stream = self.stream_class(data)
|
||||
|
||||
return decoder(stream)
|
||||
|
||||
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
|
||||
"""
|
||||
Decodes the binary value ``data`` as a sequence of values of the ABI types
|
||||
in ``types`` via the head-tail mechanism into a tuple of equivalent python
|
||||
values.
|
||||
|
||||
:param types: An iterable of string representations of the ABI types that
|
||||
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
|
||||
:param data: The binary value to be decoded.
|
||||
|
||||
:returns: A tuple of equivalent python values for the ABI values
|
||||
represented in ``data``.
|
||||
"""
|
||||
warnings.warn(
|
||||
"abi.decode_abi() is deprecated and will be removed in version 4.0.0 in "
|
||||
"favor of abi.decode()",
|
||||
category=DeprecationWarning,
|
||||
)
|
||||
return self.decode(types, data)
|
||||
|
||||
def decode(self, types, data):
|
||||
if not is_bytes(data):
|
||||
raise TypeError(
|
||||
f"The `data` value must be of bytes type. Got {type(data)}"
|
||||
)
|
||||
|
||||
decoders = [self._registry.get_decoder(type_str) for type_str in types]
|
||||
|
||||
decoder = TupleDecoder(decoders=decoders)
|
||||
stream = self.stream_class(data)
|
||||
|
||||
return decoder(stream)
|
||||
|
||||
|
||||
class ABICodec(ABIEncoder, ABIDecoder):
|
||||
pass
|
||||
3
ccxt/static_dependencies/ethereum/abi/constants.py
Normal file
3
ccxt/static_dependencies/ethereum/abi/constants.py
Normal file
@@ -0,0 +1,3 @@
|
||||
TT256 = 2**256
|
||||
TT256M1 = 2**256 - 1
|
||||
TT255 = 2**255
|
||||
565
ccxt/static_dependencies/ethereum/abi/decoding.py
Normal file
565
ccxt/static_dependencies/ethereum/abi/decoding.py
Normal file
@@ -0,0 +1,565 @@
|
||||
import abc
|
||||
import decimal
|
||||
import io
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
from ..utils import (
|
||||
big_endian_to_int,
|
||||
to_normalized_address,
|
||||
to_tuple,
|
||||
)
|
||||
|
||||
from .base import (
|
||||
BaseCoder,
|
||||
parse_tuple_type_str,
|
||||
parse_type_str,
|
||||
)
|
||||
from .exceptions import (
|
||||
DecodingError,
|
||||
InsufficientDataBytes,
|
||||
NonEmptyPaddingBytes,
|
||||
)
|
||||
from .utils.numeric import (
|
||||
TEN,
|
||||
abi_decimal_context,
|
||||
ceil32,
|
||||
)
|
||||
|
||||
|
||||
class ContextFramesBytesIO(io.BytesIO):
|
||||
"""
|
||||
A byte stream which can track a series of contextual frames in a stack. This
|
||||
data structure is necessary to perform nested decodings using the
|
||||
:py:class:``HeadTailDecoder`` since offsets present in head sections are
|
||||
relative only to a particular encoded object. These offsets can only be
|
||||
used to locate a position in a decoding stream if they are paired with a
|
||||
contextual offset that establishes the position of the object in which they
|
||||
are found.
|
||||
|
||||
For example, consider the encoding of a value for the following type::
|
||||
|
||||
type: (int,(int,int[]))
|
||||
value: (1,(2,[3,3]))
|
||||
|
||||
There are two tuples in this type: one inner and one outer. The inner tuple
|
||||
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
|
||||
This means that its value encoding will be placed in the tail section of the
|
||||
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
|
||||
itself, contain a tail section with the encoding for ``[3,3]``. All
|
||||
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
|
||||
data values are normally 32 bytes wide but have been truncated to remove the
|
||||
redundant zeros at the beginnings of their encodings)::
|
||||
|
||||
offset data
|
||||
--------------------------
|
||||
^ 0 0x01
|
||||
| 32 0x40 <-- Offset of object A in global frame (64)
|
||||
-----|--------------------
|
||||
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
|
||||
| | 96 0x40 <-- Offset of object B in frame of object A (64)
|
||||
-----|-Object A's frame---
|
||||
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
|
||||
| | 160 0x03
|
||||
v v 192 0x03
|
||||
--------------------------
|
||||
|
||||
Note that the offset of object B is encoded as 64 which only specifies the
|
||||
beginning of its encoded value relative to the beginning of object A's
|
||||
encoding. Globally, object B is located at offset 128. In order to make
|
||||
sense out of object B's offset, it needs to be positioned in the context of
|
||||
its enclosing object's frame (object A).
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self._frames = []
|
||||
self._total_offset = 0
|
||||
|
||||
def seek_in_frame(self, pos, *args, **kwargs):
|
||||
"""
|
||||
Seeks relative to the total offset of the current contextual frames.
|
||||
"""
|
||||
self.seek(self._total_offset + pos, *args, **kwargs)
|
||||
|
||||
def push_frame(self, offset):
|
||||
"""
|
||||
Pushes a new contextual frame onto the stack with the given offset and a
|
||||
return position at the current cursor position then seeks to the new
|
||||
total offset.
|
||||
"""
|
||||
self._frames.append((offset, self.tell()))
|
||||
self._total_offset += offset
|
||||
|
||||
self.seek_in_frame(0)
|
||||
|
||||
def pop_frame(self):
|
||||
"""
|
||||
Pops the current contextual frame off of the stack and returns the
|
||||
cursor to the frame's return position.
|
||||
"""
|
||||
try:
|
||||
offset, return_pos = self._frames.pop()
|
||||
except IndexError:
|
||||
raise IndexError("no frames to pop")
|
||||
self._total_offset -= offset
|
||||
|
||||
self.seek(return_pos)
|
||||
|
||||
|
||||
class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
|
||||
"""
|
||||
Base class for all decoder classes. Subclass this if you want to define a
|
||||
custom decoder class. Subclasses must also implement
|
||||
:any:`BaseCoder.from_type_str`.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def decode(self, stream: ContextFramesBytesIO) -> Any: # pragma: no cover
|
||||
"""
|
||||
Decodes the given stream of bytes into a python value. Should raise
|
||||
:any:`exceptions.DecodingError` if a python value cannot be decoded
|
||||
from the given byte stream.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __call__(self, stream: ContextFramesBytesIO) -> Any:
|
||||
return self.decode(stream)
|
||||
|
||||
|
||||
class HeadTailDecoder(BaseDecoder):
|
||||
is_dynamic = True
|
||||
|
||||
tail_decoder = None
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.tail_decoder is None:
|
||||
raise ValueError("No `tail_decoder` set")
|
||||
|
||||
def decode(self, stream):
|
||||
start_pos = decode_uint_256(stream)
|
||||
|
||||
stream.push_frame(start_pos)
|
||||
value = self.tail_decoder(stream)
|
||||
stream.pop_frame()
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class TupleDecoder(BaseDecoder):
|
||||
decoders = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.decoders = tuple(
|
||||
HeadTailDecoder(tail_decoder=d) if getattr(d, "is_dynamic", False) else d
|
||||
for d in self.decoders
|
||||
)
|
||||
|
||||
self.is_dynamic = any(getattr(d, "is_dynamic", False) for d in self.decoders)
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.decoders is None:
|
||||
raise ValueError("No `decoders` set")
|
||||
|
||||
@to_tuple
|
||||
def decode(self, stream):
|
||||
for decoder in self.decoders:
|
||||
yield decoder(stream)
|
||||
|
||||
@parse_tuple_type_str
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
decoders = tuple(
|
||||
registry.get_decoder(c.to_type_str()) for c in abi_type.components
|
||||
)
|
||||
|
||||
return cls(decoders=decoders)
|
||||
|
||||
|
||||
class SingleDecoder(BaseDecoder):
|
||||
decoder_fn = None
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.decoder_fn is None:
|
||||
raise ValueError("No `decoder_fn` set")
|
||||
|
||||
def validate_padding_bytes(self, value, padding_bytes):
|
||||
raise NotImplementedError("Must be implemented by subclasses")
|
||||
|
||||
def decode(self, stream):
|
||||
raw_data = self.read_data_from_stream(stream)
|
||||
data, padding_bytes = self.split_data_and_padding(raw_data)
|
||||
value = self.decoder_fn(data)
|
||||
self.validate_padding_bytes(value, padding_bytes)
|
||||
|
||||
return value
|
||||
|
||||
def read_data_from_stream(self, stream):
|
||||
raise NotImplementedError("Must be implemented by subclasses")
|
||||
|
||||
def split_data_and_padding(self, raw_data):
|
||||
return raw_data, b""
|
||||
|
||||
|
||||
class BaseArrayDecoder(BaseDecoder):
|
||||
item_decoder = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Use a head-tail decoder to decode dynamic elements
|
||||
if self.item_decoder.is_dynamic:
|
||||
self.item_decoder = HeadTailDecoder(
|
||||
tail_decoder=self.item_decoder,
|
||||
)
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.item_decoder is None:
|
||||
raise ValueError("No `item_decoder` set")
|
||||
|
||||
@parse_type_str(with_arrlist=True)
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
item_decoder = registry.get_decoder(abi_type.item_type.to_type_str())
|
||||
|
||||
array_spec = abi_type.arrlist[-1]
|
||||
if len(array_spec) == 1:
|
||||
# If array dimension is fixed
|
||||
return SizedArrayDecoder(
|
||||
array_size=array_spec[0],
|
||||
item_decoder=item_decoder,
|
||||
)
|
||||
else:
|
||||
# If array dimension is dynamic
|
||||
return DynamicArrayDecoder(item_decoder=item_decoder)
|
||||
|
||||
|
||||
class SizedArrayDecoder(BaseArrayDecoder):
|
||||
array_size = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.is_dynamic = self.item_decoder.is_dynamic
|
||||
|
||||
@to_tuple
|
||||
def decode(self, stream):
|
||||
for _ in range(self.array_size):
|
||||
yield self.item_decoder(stream)
|
||||
|
||||
|
||||
class DynamicArrayDecoder(BaseArrayDecoder):
|
||||
# Dynamic arrays are always dynamic, regardless of their elements
|
||||
is_dynamic = True
|
||||
|
||||
@to_tuple
|
||||
def decode(self, stream):
|
||||
array_size = decode_uint_256(stream)
|
||||
stream.push_frame(32)
|
||||
for _ in range(array_size):
|
||||
yield self.item_decoder(stream)
|
||||
stream.pop_frame()
|
||||
|
||||
|
||||
class FixedByteSizeDecoder(SingleDecoder):
|
||||
decoder_fn = None
|
||||
value_bit_size = None
|
||||
data_byte_size = None
|
||||
is_big_endian = None
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.value_bit_size is None:
|
||||
raise ValueError("`value_bit_size` may not be None")
|
||||
if self.data_byte_size is None:
|
||||
raise ValueError("`data_byte_size` may not be None")
|
||||
if self.decoder_fn is None:
|
||||
raise ValueError("`decoder_fn` may not be None")
|
||||
if self.is_big_endian is None:
|
||||
raise ValueError("`is_big_endian` may not be None")
|
||||
|
||||
if self.value_bit_size % 8 != 0:
|
||||
raise ValueError(
|
||||
"Invalid value bit size: {0}. Must be a multiple of 8".format(
|
||||
self.value_bit_size,
|
||||
)
|
||||
)
|
||||
|
||||
if self.value_bit_size > self.data_byte_size * 8:
|
||||
raise ValueError("Value byte size exceeds data size")
|
||||
|
||||
def read_data_from_stream(self, stream):
|
||||
data = stream.read(self.data_byte_size)
|
||||
|
||||
if len(data) != self.data_byte_size:
|
||||
raise InsufficientDataBytes(
|
||||
"Tried to read {0} bytes. Only got {1} bytes".format(
|
||||
self.data_byte_size,
|
||||
len(data),
|
||||
)
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
def split_data_and_padding(self, raw_data):
|
||||
value_byte_size = self._get_value_byte_size()
|
||||
padding_size = self.data_byte_size - value_byte_size
|
||||
|
||||
if self.is_big_endian:
|
||||
padding_bytes = raw_data[:padding_size]
|
||||
data = raw_data[padding_size:]
|
||||
else:
|
||||
data = raw_data[:value_byte_size]
|
||||
padding_bytes = raw_data[value_byte_size:]
|
||||
|
||||
return data, padding_bytes
|
||||
|
||||
def validate_padding_bytes(self, value, padding_bytes):
|
||||
value_byte_size = self._get_value_byte_size()
|
||||
padding_size = self.data_byte_size - value_byte_size
|
||||
|
||||
if padding_bytes != b"\x00" * padding_size:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
|
||||
)
|
||||
|
||||
def _get_value_byte_size(self):
|
||||
value_byte_size = self.value_bit_size // 8
|
||||
return value_byte_size
|
||||
|
||||
|
||||
class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
|
||||
data_byte_size = 32
|
||||
|
||||
|
||||
class BooleanDecoder(Fixed32ByteSizeDecoder):
|
||||
value_bit_size = 8
|
||||
is_big_endian = True
|
||||
|
||||
@staticmethod
|
||||
def decoder_fn(data):
|
||||
if data == b"\x00":
|
||||
return False
|
||||
elif data == b"\x01":
|
||||
return True
|
||||
else:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Boolean must be either 0x0 or 0x1. Got: {0}".format(repr(data))
|
||||
)
|
||||
|
||||
@parse_type_str("bool")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
class AddressDecoder(Fixed32ByteSizeDecoder):
|
||||
value_bit_size = 20 * 8
|
||||
is_big_endian = True
|
||||
decoder_fn = staticmethod(to_normalized_address)
|
||||
|
||||
@parse_type_str("address")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
#
|
||||
# Unsigned Integer Decoders
|
||||
#
|
||||
class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
|
||||
decoder_fn = staticmethod(big_endian_to_int)
|
||||
is_big_endian = True
|
||||
|
||||
@parse_type_str("uint")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(value_bit_size=abi_type.sub)
|
||||
|
||||
|
||||
decode_uint_256 = UnsignedIntegerDecoder(value_bit_size=256)
|
||||
|
||||
|
||||
#
|
||||
# Signed Integer Decoders
|
||||
#
|
||||
class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
|
||||
is_big_endian = True
|
||||
|
||||
def decoder_fn(self, data):
|
||||
value = big_endian_to_int(data)
|
||||
if value >= 2 ** (self.value_bit_size - 1):
|
||||
return value - 2**self.value_bit_size
|
||||
else:
|
||||
return value
|
||||
|
||||
def validate_padding_bytes(self, value, padding_bytes):
|
||||
value_byte_size = self._get_value_byte_size()
|
||||
padding_size = self.data_byte_size - value_byte_size
|
||||
|
||||
if value >= 0:
|
||||
expected_padding_bytes = b"\x00" * padding_size
|
||||
else:
|
||||
expected_padding_bytes = b"\xff" * padding_size
|
||||
|
||||
if padding_bytes != expected_padding_bytes:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
|
||||
)
|
||||
|
||||
@parse_type_str("int")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(value_bit_size=abi_type.sub)
|
||||
|
||||
|
||||
#
|
||||
# Bytes1..32
|
||||
#
|
||||
class BytesDecoder(Fixed32ByteSizeDecoder):
|
||||
is_big_endian = False
|
||||
|
||||
@staticmethod
|
||||
def decoder_fn(data):
|
||||
return data
|
||||
|
||||
@parse_type_str("bytes")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(value_bit_size=abi_type.sub * 8)
|
||||
|
||||
|
||||
class BaseFixedDecoder(Fixed32ByteSizeDecoder):
|
||||
frac_places = None
|
||||
is_big_endian = True
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.frac_places is None:
|
||||
raise ValueError("must specify `frac_places`")
|
||||
|
||||
if self.frac_places <= 0 or self.frac_places > 80:
|
||||
raise ValueError("`frac_places` must be in range (0, 80]")
|
||||
|
||||
|
||||
class UnsignedFixedDecoder(BaseFixedDecoder):
|
||||
def decoder_fn(self, data):
|
||||
value = big_endian_to_int(data)
|
||||
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
decimal_value = decimal.Decimal(value) / TEN**self.frac_places
|
||||
|
||||
return decimal_value
|
||||
|
||||
@parse_type_str("ufixed")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
value_bit_size, frac_places = abi_type.sub
|
||||
|
||||
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
|
||||
|
||||
|
||||
class SignedFixedDecoder(BaseFixedDecoder):
|
||||
def decoder_fn(self, data):
|
||||
value = big_endian_to_int(data)
|
||||
if value >= 2 ** (self.value_bit_size - 1):
|
||||
signed_value = value - 2**self.value_bit_size
|
||||
else:
|
||||
signed_value = value
|
||||
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
decimal_value = decimal.Decimal(signed_value) / TEN**self.frac_places
|
||||
|
||||
return decimal_value
|
||||
|
||||
def validate_padding_bytes(self, value, padding_bytes):
|
||||
value_byte_size = self._get_value_byte_size()
|
||||
padding_size = self.data_byte_size - value_byte_size
|
||||
|
||||
if value >= 0:
|
||||
expected_padding_bytes = b"\x00" * padding_size
|
||||
else:
|
||||
expected_padding_bytes = b"\xff" * padding_size
|
||||
|
||||
if padding_bytes != expected_padding_bytes:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
|
||||
)
|
||||
|
||||
@parse_type_str("fixed")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
value_bit_size, frac_places = abi_type.sub
|
||||
|
||||
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
|
||||
|
||||
|
||||
#
|
||||
# String and Bytes
|
||||
#
|
||||
class ByteStringDecoder(SingleDecoder):
|
||||
is_dynamic = True
|
||||
|
||||
@staticmethod
|
||||
def decoder_fn(data):
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def read_data_from_stream(stream):
|
||||
data_length = decode_uint_256(stream)
|
||||
padded_length = ceil32(data_length)
|
||||
|
||||
data = stream.read(padded_length)
|
||||
|
||||
if len(data) < padded_length:
|
||||
raise InsufficientDataBytes(
|
||||
"Tried to read {0} bytes. Only got {1} bytes".format(
|
||||
padded_length,
|
||||
len(data),
|
||||
)
|
||||
)
|
||||
|
||||
padding_bytes = data[data_length:]
|
||||
|
||||
if padding_bytes != b"\x00" * (padded_length - data_length):
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
|
||||
)
|
||||
|
||||
return data[:data_length]
|
||||
|
||||
def validate_padding_bytes(self, value, padding_bytes):
|
||||
pass
|
||||
|
||||
@parse_type_str("bytes")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
class StringDecoder(ByteStringDecoder):
|
||||
@parse_type_str("string")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
@staticmethod
|
||||
def decoder_fn(data):
|
||||
try:
|
||||
value = data.decode("utf-8")
|
||||
except UnicodeDecodeError as e:
|
||||
raise DecodingError(
|
||||
e.encoding,
|
||||
e.object,
|
||||
e.start,
|
||||
e.end,
|
||||
"The returned type for this function is string which is "
|
||||
"expected to be a UTF8 encoded string of text. The returned "
|
||||
"value could not be decoded as valid UTF8. This is indicative "
|
||||
"of a broken application which is using incorrect return types for "
|
||||
"binary data.",
|
||||
) from e
|
||||
return value
|
||||
720
ccxt/static_dependencies/ethereum/abi/encoding.py
Normal file
720
ccxt/static_dependencies/ethereum/abi/encoding.py
Normal file
@@ -0,0 +1,720 @@
|
||||
import abc
|
||||
import codecs
|
||||
import decimal
|
||||
from itertools import (
|
||||
accumulate,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
Optional,
|
||||
Type,
|
||||
)
|
||||
|
||||
from ..utils import (
|
||||
int_to_big_endian,
|
||||
is_address,
|
||||
is_boolean,
|
||||
is_bytes,
|
||||
is_integer,
|
||||
is_list_like,
|
||||
is_number,
|
||||
is_text,
|
||||
to_canonical_address,
|
||||
)
|
||||
|
||||
from .base import (
|
||||
BaseCoder,
|
||||
parse_tuple_type_str,
|
||||
parse_type_str,
|
||||
)
|
||||
from .exceptions import (
|
||||
EncodingTypeError,
|
||||
IllegalValue,
|
||||
ValueOutOfBounds,
|
||||
)
|
||||
from .utils.numeric import (
|
||||
TEN,
|
||||
abi_decimal_context,
|
||||
ceil32,
|
||||
compute_signed_fixed_bounds,
|
||||
compute_signed_integer_bounds,
|
||||
compute_unsigned_fixed_bounds,
|
||||
compute_unsigned_integer_bounds,
|
||||
)
|
||||
from .utils.padding import (
|
||||
fpad,
|
||||
zpad,
|
||||
zpad_right,
|
||||
)
|
||||
from .utils.string import (
|
||||
abbr,
|
||||
)
|
||||
|
||||
|
||||
class BaseEncoder(BaseCoder, metaclass=abc.ABCMeta):
|
||||
"""
|
||||
Base class for all encoder classes. Subclass this if you want to define a
|
||||
custom encoder class. Subclasses must also implement
|
||||
:any:`BaseCoder.from_type_str`.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def encode(self, value: Any) -> bytes: # pragma: no cover
|
||||
"""
|
||||
Encodes the given value as a sequence of bytes. Should raise
|
||||
:any:`exceptions.EncodingError` if ``value`` cannot be encoded.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def validate_value(self, value: Any) -> None: # pragma: no cover
|
||||
"""
|
||||
Checks whether or not the given value can be encoded by this encoder.
|
||||
If the given value cannot be encoded, must raise
|
||||
:any:`exceptions.EncodingError`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def invalidate_value(
|
||||
cls,
|
||||
value: Any,
|
||||
exc: Type[Exception] = EncodingTypeError,
|
||||
msg: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Throws a standard exception for when a value is not encodable by an
|
||||
encoder.
|
||||
"""
|
||||
raise exc(
|
||||
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
|
||||
rep=abbr(value),
|
||||
typ=type(value),
|
||||
cls=cls.__name__,
|
||||
msg="" if msg is None else (": " + msg),
|
||||
)
|
||||
)
|
||||
|
||||
def __call__(self, value: Any) -> bytes:
|
||||
return self.encode(value)
|
||||
|
||||
|
||||
class TupleEncoder(BaseEncoder):
|
||||
encoders = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.is_dynamic = any(getattr(e, "is_dynamic", False) for e in self.encoders)
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.encoders is None:
|
||||
raise ValueError("`encoders` may not be none")
|
||||
|
||||
def validate_value(self, value):
|
||||
if not is_list_like(value):
|
||||
self.invalidate_value(
|
||||
value,
|
||||
msg="must be list-like object such as array or tuple",
|
||||
)
|
||||
|
||||
if len(value) != len(self.encoders):
|
||||
self.invalidate_value(
|
||||
value,
|
||||
exc=ValueOutOfBounds,
|
||||
msg="value has {} items when {} were expected".format(
|
||||
len(value),
|
||||
len(self.encoders),
|
||||
),
|
||||
)
|
||||
|
||||
for item, encoder in zip(value, self.encoders):
|
||||
try:
|
||||
encoder.validate_value(item)
|
||||
except AttributeError:
|
||||
encoder(item)
|
||||
|
||||
def encode(self, values):
|
||||
self.validate_value(values)
|
||||
|
||||
raw_head_chunks = []
|
||||
tail_chunks = []
|
||||
for value, encoder in zip(values, self.encoders):
|
||||
if getattr(encoder, "is_dynamic", False):
|
||||
raw_head_chunks.append(None)
|
||||
tail_chunks.append(encoder(value))
|
||||
else:
|
||||
raw_head_chunks.append(encoder(value))
|
||||
tail_chunks.append(b"")
|
||||
|
||||
head_length = sum(32 if item is None else len(item) for item in raw_head_chunks)
|
||||
tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
|
||||
head_chunks = tuple(
|
||||
encode_uint_256(head_length + offset) if chunk is None else chunk
|
||||
for chunk, offset in zip(raw_head_chunks, tail_offsets)
|
||||
)
|
||||
|
||||
encoded_value = b"".join(head_chunks + tuple(tail_chunks))
|
||||
|
||||
return encoded_value
|
||||
|
||||
@parse_tuple_type_str
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
encoders = tuple(
|
||||
registry.get_encoder(c.to_type_str()) for c in abi_type.components
|
||||
)
|
||||
|
||||
return cls(encoders=encoders)
|
||||
|
||||
|
||||
class FixedSizeEncoder(BaseEncoder):
|
||||
value_bit_size = None
|
||||
data_byte_size = None
|
||||
encode_fn = None
|
||||
type_check_fn = None
|
||||
is_big_endian = None
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.value_bit_size is None:
|
||||
raise ValueError("`value_bit_size` may not be none")
|
||||
if self.data_byte_size is None:
|
||||
raise ValueError("`data_byte_size` may not be none")
|
||||
if self.encode_fn is None:
|
||||
raise ValueError("`encode_fn` may not be none")
|
||||
if self.is_big_endian is None:
|
||||
raise ValueError("`is_big_endian` may not be none")
|
||||
|
||||
if self.value_bit_size % 8 != 0:
|
||||
raise ValueError(
|
||||
"Invalid value bit size: {0}. Must be a multiple of 8".format(
|
||||
self.value_bit_size,
|
||||
)
|
||||
)
|
||||
|
||||
if self.value_bit_size > self.data_byte_size * 8:
|
||||
raise ValueError("Value byte size exceeds data size")
|
||||
|
||||
def validate_value(self, value):
|
||||
raise NotImplementedError("Must be implemented by subclasses")
|
||||
|
||||
def encode(self, value):
|
||||
self.validate_value(value)
|
||||
base_encoded_value = self.encode_fn(value)
|
||||
|
||||
if self.is_big_endian:
|
||||
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
|
||||
else:
|
||||
padded_encoded_value = zpad_right(base_encoded_value, self.data_byte_size)
|
||||
|
||||
return padded_encoded_value
|
||||
|
||||
|
||||
class Fixed32ByteSizeEncoder(FixedSizeEncoder):
|
||||
data_byte_size = 32
|
||||
|
||||
|
||||
class BooleanEncoder(Fixed32ByteSizeEncoder):
|
||||
value_bit_size = 8
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_boolean(value):
|
||||
cls.invalidate_value(value)
|
||||
|
||||
@classmethod
|
||||
def encode_fn(cls, value):
|
||||
if value is True:
|
||||
return b"\x01"
|
||||
elif value is False:
|
||||
return b"\x00"
|
||||
else:
|
||||
raise ValueError("Invariant")
|
||||
|
||||
@parse_type_str("bool")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
class PackedBooleanEncoder(BooleanEncoder):
|
||||
data_byte_size = 1
|
||||
|
||||
|
||||
class NumberEncoder(Fixed32ByteSizeEncoder):
|
||||
is_big_endian = True
|
||||
bounds_fn = None
|
||||
illegal_value_fn = None
|
||||
type_check_fn = None
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.bounds_fn is None:
|
||||
raise ValueError("`bounds_fn` cannot be null")
|
||||
if self.type_check_fn is None:
|
||||
raise ValueError("`type_check_fn` cannot be null")
|
||||
|
||||
def validate_value(self, value):
|
||||
if not self.type_check_fn(value):
|
||||
self.invalidate_value(value)
|
||||
|
||||
illegal_value = self.illegal_value_fn is not None and self.illegal_value_fn(
|
||||
value
|
||||
)
|
||||
if illegal_value:
|
||||
self.invalidate_value(value, exc=IllegalValue)
|
||||
|
||||
lower_bound, upper_bound = self.bounds_fn(self.value_bit_size)
|
||||
if value < lower_bound or value > upper_bound:
|
||||
self.invalidate_value(
|
||||
value,
|
||||
exc=ValueOutOfBounds,
|
||||
msg=(
|
||||
"Cannot be encoded in {} bits. Must be bounded "
|
||||
"between [{}, {}].".format(
|
||||
self.value_bit_size,
|
||||
lower_bound,
|
||||
upper_bound,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class UnsignedIntegerEncoder(NumberEncoder):
|
||||
encode_fn = staticmethod(int_to_big_endian)
|
||||
bounds_fn = staticmethod(compute_unsigned_integer_bounds)
|
||||
type_check_fn = staticmethod(is_integer)
|
||||
|
||||
@parse_type_str("uint")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(value_bit_size=abi_type.sub)
|
||||
|
||||
|
||||
encode_uint_256 = UnsignedIntegerEncoder(value_bit_size=256, data_byte_size=32)
|
||||
|
||||
|
||||
class PackedUnsignedIntegerEncoder(UnsignedIntegerEncoder):
|
||||
@parse_type_str("uint")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(
|
||||
value_bit_size=abi_type.sub,
|
||||
data_byte_size=abi_type.sub // 8,
|
||||
)
|
||||
|
||||
|
||||
class SignedIntegerEncoder(NumberEncoder):
|
||||
bounds_fn = staticmethod(compute_signed_integer_bounds)
|
||||
type_check_fn = staticmethod(is_integer)
|
||||
|
||||
def encode_fn(self, value):
|
||||
return int_to_big_endian(value % (2**self.value_bit_size))
|
||||
|
||||
def encode(self, value):
|
||||
self.validate_value(value)
|
||||
base_encoded_value = self.encode_fn(value)
|
||||
|
||||
if value >= 0:
|
||||
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
|
||||
else:
|
||||
padded_encoded_value = fpad(base_encoded_value, self.data_byte_size)
|
||||
|
||||
return padded_encoded_value
|
||||
|
||||
@parse_type_str("int")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(value_bit_size=abi_type.sub)
|
||||
|
||||
|
||||
class PackedSignedIntegerEncoder(SignedIntegerEncoder):
|
||||
@parse_type_str("int")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(
|
||||
value_bit_size=abi_type.sub,
|
||||
data_byte_size=abi_type.sub // 8,
|
||||
)
|
||||
|
||||
|
||||
class BaseFixedEncoder(NumberEncoder):
|
||||
frac_places = None
|
||||
|
||||
@staticmethod
|
||||
def type_check_fn(value):
|
||||
return is_number(value) and not isinstance(value, float)
|
||||
|
||||
@staticmethod
|
||||
def illegal_value_fn(value):
|
||||
if isinstance(value, decimal.Decimal):
|
||||
return value.is_nan() or value.is_infinite()
|
||||
|
||||
return False
|
||||
|
||||
def validate_value(self, value):
|
||||
super().validate_value(value)
|
||||
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
residue = value % (TEN**-self.frac_places)
|
||||
|
||||
if residue > 0:
|
||||
self.invalidate_value(
|
||||
value,
|
||||
exc=IllegalValue,
|
||||
msg="residue {} outside allowed fractional precision of {}".format(
|
||||
repr(residue),
|
||||
self.frac_places,
|
||||
),
|
||||
)
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.frac_places is None:
|
||||
raise ValueError("must specify `frac_places`")
|
||||
|
||||
if self.frac_places <= 0 or self.frac_places > 80:
|
||||
raise ValueError("`frac_places` must be in range (0, 80]")
|
||||
|
||||
|
||||
class UnsignedFixedEncoder(BaseFixedEncoder):
|
||||
def bounds_fn(self, value_bit_size):
|
||||
return compute_unsigned_fixed_bounds(self.value_bit_size, self.frac_places)
|
||||
|
||||
def encode_fn(self, value):
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
scaled_value = value * TEN**self.frac_places
|
||||
integer_value = int(scaled_value)
|
||||
|
||||
return int_to_big_endian(integer_value)
|
||||
|
||||
@parse_type_str("ufixed")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
value_bit_size, frac_places = abi_type.sub
|
||||
|
||||
return cls(
|
||||
value_bit_size=value_bit_size,
|
||||
frac_places=frac_places,
|
||||
)
|
||||
|
||||
|
||||
class PackedUnsignedFixedEncoder(UnsignedFixedEncoder):
|
||||
@parse_type_str("ufixed")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
value_bit_size, frac_places = abi_type.sub
|
||||
|
||||
return cls(
|
||||
value_bit_size=value_bit_size,
|
||||
data_byte_size=value_bit_size // 8,
|
||||
frac_places=frac_places,
|
||||
)
|
||||
|
||||
|
||||
class SignedFixedEncoder(BaseFixedEncoder):
|
||||
def bounds_fn(self, value_bit_size):
|
||||
return compute_signed_fixed_bounds(self.value_bit_size, self.frac_places)
|
||||
|
||||
def encode_fn(self, value):
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
scaled_value = value * TEN**self.frac_places
|
||||
integer_value = int(scaled_value)
|
||||
|
||||
unsigned_integer_value = integer_value % (2**self.value_bit_size)
|
||||
|
||||
return int_to_big_endian(unsigned_integer_value)
|
||||
|
||||
def encode(self, value):
|
||||
self.validate_value(value)
|
||||
base_encoded_value = self.encode_fn(value)
|
||||
|
||||
if value >= 0:
|
||||
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
|
||||
else:
|
||||
padded_encoded_value = fpad(base_encoded_value, self.data_byte_size)
|
||||
|
||||
return padded_encoded_value
|
||||
|
||||
@parse_type_str("fixed")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
value_bit_size, frac_places = abi_type.sub
|
||||
|
||||
return cls(
|
||||
value_bit_size=value_bit_size,
|
||||
frac_places=frac_places,
|
||||
)
|
||||
|
||||
|
||||
class PackedSignedFixedEncoder(SignedFixedEncoder):
|
||||
@parse_type_str("fixed")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
value_bit_size, frac_places = abi_type.sub
|
||||
|
||||
return cls(
|
||||
value_bit_size=value_bit_size,
|
||||
data_byte_size=value_bit_size // 8,
|
||||
frac_places=frac_places,
|
||||
)
|
||||
|
||||
|
||||
class AddressEncoder(Fixed32ByteSizeEncoder):
|
||||
value_bit_size = 20 * 8
|
||||
encode_fn = staticmethod(to_canonical_address)
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_address(value):
|
||||
cls.invalidate_value(value)
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.value_bit_size != 20 * 8:
|
||||
raise ValueError("Addresses must be 160 bits in length")
|
||||
|
||||
@parse_type_str("address")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
class PackedAddressEncoder(AddressEncoder):
|
||||
data_byte_size = 20
|
||||
|
||||
|
||||
class BytesEncoder(Fixed32ByteSizeEncoder):
|
||||
is_big_endian = False
|
||||
|
||||
def validate_value(self, value):
|
||||
if not is_bytes(value):
|
||||
self.invalidate_value(value)
|
||||
|
||||
byte_size = self.value_bit_size // 8
|
||||
if len(value) > byte_size:
|
||||
self.invalidate_value(
|
||||
value,
|
||||
exc=ValueOutOfBounds,
|
||||
msg="exceeds total byte size for bytes{} encoding".format(byte_size),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def encode_fn(value):
|
||||
return value
|
||||
|
||||
@parse_type_str("bytes")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(value_bit_size=abi_type.sub * 8)
|
||||
|
||||
|
||||
class PackedBytesEncoder(BytesEncoder):
|
||||
@parse_type_str("bytes")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls(
|
||||
value_bit_size=abi_type.sub * 8,
|
||||
data_byte_size=abi_type.sub,
|
||||
)
|
||||
|
||||
|
||||
class ByteStringEncoder(BaseEncoder):
|
||||
is_dynamic = True
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_bytes(value):
|
||||
cls.invalidate_value(value)
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
|
||||
if not value:
|
||||
padded_value = b"\x00" * 32
|
||||
else:
|
||||
padded_value = zpad_right(value, ceil32(len(value)))
|
||||
|
||||
encoded_size = encode_uint_256(len(value))
|
||||
encoded_value = encoded_size + padded_value
|
||||
|
||||
return encoded_value
|
||||
|
||||
@parse_type_str("bytes")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
class PackedByteStringEncoder(ByteStringEncoder):
|
||||
is_dynamic = False
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
return value
|
||||
|
||||
|
||||
class TextStringEncoder(BaseEncoder):
|
||||
is_dynamic = True
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_text(value):
|
||||
cls.invalidate_value(value)
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
|
||||
value_as_bytes = codecs.encode(value, "utf8")
|
||||
|
||||
if not value_as_bytes:
|
||||
padded_value = b"\x00" * 32
|
||||
else:
|
||||
padded_value = zpad_right(value_as_bytes, ceil32(len(value_as_bytes)))
|
||||
|
||||
encoded_size = encode_uint_256(len(value_as_bytes))
|
||||
encoded_value = encoded_size + padded_value
|
||||
|
||||
return encoded_value
|
||||
|
||||
@parse_type_str("string")
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
return cls()
|
||||
|
||||
|
||||
class PackedTextStringEncoder(TextStringEncoder):
|
||||
is_dynamic = False
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
return codecs.encode(value, "utf8")
|
||||
|
||||
|
||||
class BaseArrayEncoder(BaseEncoder):
|
||||
item_encoder = None
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.item_encoder is None:
|
||||
raise ValueError("`item_encoder` may not be none")
|
||||
|
||||
def validate_value(self, value):
|
||||
if not is_list_like(value):
|
||||
self.invalidate_value(
|
||||
value,
|
||||
msg="must be list-like such as array or tuple",
|
||||
)
|
||||
|
||||
for item in value:
|
||||
self.item_encoder.validate_value(item)
|
||||
|
||||
def encode_elements(self, value):
|
||||
self.validate_value(value)
|
||||
|
||||
item_encoder = self.item_encoder
|
||||
tail_chunks = tuple(item_encoder(i) for i in value)
|
||||
|
||||
items_are_dynamic = getattr(item_encoder, "is_dynamic", False)
|
||||
if not items_are_dynamic:
|
||||
return b"".join(tail_chunks)
|
||||
|
||||
head_length = 32 * len(value)
|
||||
tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
|
||||
head_chunks = tuple(
|
||||
encode_uint_256(head_length + offset) for offset in tail_offsets
|
||||
)
|
||||
return b"".join(head_chunks + tail_chunks)
|
||||
|
||||
@parse_type_str(with_arrlist=True)
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
|
||||
|
||||
array_spec = abi_type.arrlist[-1]
|
||||
if len(array_spec) == 1:
|
||||
# If array dimension is fixed
|
||||
return SizedArrayEncoder(
|
||||
array_size=array_spec[0],
|
||||
item_encoder=item_encoder,
|
||||
)
|
||||
else:
|
||||
# If array dimension is dynamic
|
||||
return DynamicArrayEncoder(item_encoder=item_encoder)
|
||||
|
||||
|
||||
class PackedArrayEncoder(BaseArrayEncoder):
|
||||
array_size = None
|
||||
|
||||
def validate_value(self, value):
|
||||
super().validate_value(value)
|
||||
|
||||
if self.array_size is not None and len(value) != self.array_size:
|
||||
self.invalidate_value(
|
||||
value,
|
||||
exc=ValueOutOfBounds,
|
||||
msg="value has {} items when {} were expected".format(
|
||||
len(value),
|
||||
self.array_size,
|
||||
),
|
||||
)
|
||||
|
||||
def encode(self, value):
|
||||
encoded_elements = self.encode_elements(value)
|
||||
|
||||
return encoded_elements
|
||||
|
||||
@parse_type_str(with_arrlist=True)
|
||||
def from_type_str(cls, abi_type, registry):
|
||||
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
|
||||
|
||||
array_spec = abi_type.arrlist[-1]
|
||||
if len(array_spec) == 1:
|
||||
return cls(
|
||||
array_size=array_spec[0],
|
||||
item_encoder=item_encoder,
|
||||
)
|
||||
else:
|
||||
return cls(item_encoder=item_encoder)
|
||||
|
||||
|
||||
class SizedArrayEncoder(BaseArrayEncoder):
|
||||
array_size = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.is_dynamic = self.item_encoder.is_dynamic
|
||||
|
||||
def validate(self):
|
||||
super().validate()
|
||||
|
||||
if self.array_size is None:
|
||||
raise ValueError("`array_size` may not be none")
|
||||
|
||||
def validate_value(self, value):
|
||||
super().validate_value(value)
|
||||
|
||||
if len(value) != self.array_size:
|
||||
self.invalidate_value(
|
||||
value,
|
||||
exc=ValueOutOfBounds,
|
||||
msg="value has {} items when {} were expected".format(
|
||||
len(value),
|
||||
self.array_size,
|
||||
),
|
||||
)
|
||||
|
||||
def encode(self, value):
|
||||
encoded_elements = self.encode_elements(value)
|
||||
|
||||
return encoded_elements
|
||||
|
||||
|
||||
class DynamicArrayEncoder(BaseArrayEncoder):
|
||||
is_dynamic = True
|
||||
|
||||
def encode(self, value):
|
||||
encoded_size = encode_uint_256(len(value))
|
||||
encoded_elements = self.encode_elements(value)
|
||||
encoded_value = encoded_size + encoded_elements
|
||||
|
||||
return encoded_value
|
||||
139
ccxt/static_dependencies/ethereum/abi/exceptions.py
Normal file
139
ccxt/static_dependencies/ethereum/abi/exceptions.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from ...parsimonious import (
|
||||
ParseError
|
||||
)
|
||||
|
||||
|
||||
class EncodingError(Exception):
|
||||
"""
|
||||
Base exception for any error that occurs during encoding.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class EncodingTypeError(EncodingError):
|
||||
"""
|
||||
Raised when trying to encode a python value whose type is not supported for
|
||||
the output ABI type.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IllegalValue(EncodingError):
|
||||
"""
|
||||
Raised when trying to encode a python value with the correct type but with
|
||||
a value that is not considered legal for the output ABI type.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
fixed128x19_encoder(Decimal('NaN')) # cannot encode NaN
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ValueOutOfBounds(IllegalValue):
|
||||
"""
|
||||
Raised when trying to encode a python value with the correct type but with
|
||||
a value that appears outside the range of valid values for the output ABI
|
||||
type.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
ufixed8x1_encoder(Decimal('25.6')) # out of bounds
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class DecodingError(Exception):
|
||||
"""
|
||||
Base exception for any error that occurs during decoding.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InsufficientDataBytes(DecodingError):
|
||||
"""
|
||||
Raised when there are insufficient data to decode a value for a given ABI
|
||||
type.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NonEmptyPaddingBytes(DecodingError):
|
||||
"""
|
||||
Raised when the padding bytes of an ABI value are malformed.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ParseError(ParseError):
|
||||
"""
|
||||
Raised when an ABI type string cannot be parsed.
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "Parse error at '{}' (column {}) in type string '{}'".format(
|
||||
self.text[self.pos : self.pos + 5],
|
||||
self.column(),
|
||||
self.text,
|
||||
)
|
||||
|
||||
|
||||
class ABITypeError(ValueError):
|
||||
"""
|
||||
Raised when a parsed ABI type has inconsistent properties; for example,
|
||||
when trying to parse the type string ``'uint7'`` (which has a bit-width
|
||||
that is not congruent with zero modulo eight).
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PredicateMappingError(Exception):
|
||||
"""
|
||||
Raised when an error occurs in a registry's internal mapping.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NoEntriesFound(ValueError, PredicateMappingError):
|
||||
"""
|
||||
Raised when no registration is found for a type string in a registry's
|
||||
internal mapping.
|
||||
|
||||
.. warning::
|
||||
|
||||
In a future version of ``eth-abi``, this error class will no longer
|
||||
inherit from ``ValueError``.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MultipleEntriesFound(ValueError, PredicateMappingError):
|
||||
"""
|
||||
Raised when multiple registrations are found for a type string in a
|
||||
registry's internal mapping. This error is non-recoverable and indicates
|
||||
that a registry was configured incorrectly. Registrations are expected to
|
||||
cover completely distinct ranges of type strings.
|
||||
|
||||
.. warning::
|
||||
|
||||
In a future version of ``eth-abi``, this error class will no longer
|
||||
inherit from ``ValueError``.
|
||||
"""
|
||||
|
||||
pass
|
||||
443
ccxt/static_dependencies/ethereum/abi/grammar.py
Normal file
443
ccxt/static_dependencies/ethereum/abi/grammar.py
Normal file
@@ -0,0 +1,443 @@
|
||||
import functools
|
||||
import re
|
||||
|
||||
from ...parsimonious import (
|
||||
expressions,
|
||||
ParseError,
|
||||
NodeVisitor,
|
||||
Grammar
|
||||
)
|
||||
|
||||
from .exceptions import (
|
||||
ABITypeError,
|
||||
ParseError,
|
||||
)
|
||||
|
||||
grammar = Grammar(
|
||||
r"""
|
||||
type = tuple_type / basic_type
|
||||
|
||||
tuple_type = components arrlist?
|
||||
components = non_zero_tuple / zero_tuple
|
||||
|
||||
non_zero_tuple = "(" type next_type* ")"
|
||||
next_type = "," type
|
||||
|
||||
zero_tuple = "()"
|
||||
|
||||
basic_type = base sub? arrlist?
|
||||
|
||||
base = alphas
|
||||
|
||||
sub = two_size / digits
|
||||
two_size = (digits "x" digits)
|
||||
|
||||
arrlist = (const_arr / dynam_arr)+
|
||||
const_arr = "[" digits "]"
|
||||
dynam_arr = "[]"
|
||||
|
||||
alphas = ~"[A-Za-z]+"
|
||||
digits = ~"[1-9][0-9]*"
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class NodeVisitor(NodeVisitor):
|
||||
"""
|
||||
Parsimonious node visitor which performs both parsing of type strings and
|
||||
post-processing of parse trees. Parsing operations are cached.
|
||||
"""
|
||||
|
||||
grammar = grammar
|
||||
|
||||
def visit_non_zero_tuple(self, node, visited_children):
|
||||
# Ignore left and right parens
|
||||
_, first, rest, _ = visited_children
|
||||
|
||||
return (first,) + rest
|
||||
|
||||
def visit_tuple_type(self, node, visited_children):
|
||||
components, arrlist = visited_children
|
||||
|
||||
return TupleType(components, arrlist, node=node)
|
||||
|
||||
def visit_next_type(self, node, visited_children):
|
||||
# Ignore comma
|
||||
_, abi_type = visited_children
|
||||
|
||||
return abi_type
|
||||
|
||||
def visit_zero_tuple(self, node, visited_children):
|
||||
return tuple()
|
||||
|
||||
def visit_basic_type(self, node, visited_children):
|
||||
base, sub, arrlist = visited_children
|
||||
|
||||
return BasicType(base, sub, arrlist, node=node)
|
||||
|
||||
def visit_two_size(self, node, visited_children):
|
||||
# Ignore "x"
|
||||
first, _, second = visited_children
|
||||
|
||||
return first, second
|
||||
|
||||
def visit_const_arr(self, node, visited_children):
|
||||
# Ignore left and right brackets
|
||||
_, int_value, _ = visited_children
|
||||
|
||||
return (int_value,)
|
||||
|
||||
def visit_dynam_arr(self, node, visited_children):
|
||||
return tuple()
|
||||
|
||||
def visit_alphas(self, node, visited_children):
|
||||
return node.text
|
||||
|
||||
def visit_digits(self, node, visited_children):
|
||||
return int(node.text)
|
||||
|
||||
def generic_visit(self, node, visited_children):
|
||||
if isinstance(node.expr, expressions.OneOf):
|
||||
# Unwrap value chosen from alternatives
|
||||
return visited_children[0]
|
||||
if isinstance(node.expr, expressions.Optional):
|
||||
# Unwrap optional value or return `None`
|
||||
if len(visited_children) != 0:
|
||||
return visited_children[0]
|
||||
|
||||
return None
|
||||
|
||||
return tuple(visited_children)
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def parse(self, type_str):
|
||||
"""
|
||||
Parses a type string into an appropriate instance of
|
||||
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
|
||||
throws :class:`~eth_abi.exceptions.ParseError`.
|
||||
|
||||
:param type_str: The type string to be parsed.
|
||||
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
|
||||
information about the parsed type string.
|
||||
"""
|
||||
if not isinstance(type_str, str):
|
||||
raise TypeError(
|
||||
"Can only parse string values: got {}".format(type(type_str))
|
||||
)
|
||||
|
||||
try:
|
||||
return super().parse(type_str)
|
||||
except ParseError as e:
|
||||
raise ParseError(e.text, e.pos, e.expr)
|
||||
|
||||
|
||||
visitor = NodeVisitor()
|
||||
|
||||
|
||||
class ABIType:
|
||||
"""
|
||||
Base class for results of type string parsing operations.
|
||||
"""
|
||||
|
||||
__slots__ = ("arrlist", "node")
|
||||
|
||||
def __init__(self, arrlist=None, node=None):
|
||||
self.arrlist = arrlist
|
||||
"""
|
||||
The list of array dimensions for a parsed type. Equal to ``None`` if
|
||||
type string has no array dimensions.
|
||||
"""
|
||||
|
||||
self.node = node
|
||||
"""
|
||||
The parsimonious ``Node`` instance associated with this parsed type.
|
||||
Used to generate error messages for invalid types.
|
||||
"""
|
||||
|
||||
def __repr__(self): # pragma: no cover
|
||||
return "<{} {}>".format(
|
||||
type(self).__qualname__,
|
||||
repr(self.to_type_str()),
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
# Two ABI types are equal if their string representations are equal
|
||||
return type(self) is type(other) and self.to_type_str() == other.to_type_str()
|
||||
|
||||
def to_type_str(self): # pragma: no cover
|
||||
"""
|
||||
Returns the string representation of an ABI type. This will be equal to
|
||||
the type string from which it was created.
|
||||
"""
|
||||
raise NotImplementedError("Must implement `to_type_str`")
|
||||
|
||||
@property
|
||||
def item_type(self):
|
||||
"""
|
||||
If this type is an array type, equal to an appropriate
|
||||
:class:`~eth_abi.grammar.ABIType` instance for the array's items.
|
||||
"""
|
||||
raise NotImplementedError("Must implement `item_type`")
|
||||
|
||||
def validate(self): # pragma: no cover
|
||||
"""
|
||||
Validates the properties of an ABI type against the solidity ABI spec:
|
||||
|
||||
https://solidity.readthedocs.io/en/develop/abi-spec.html
|
||||
|
||||
Raises :class:`~eth_abi.exceptions.ABITypeError` if validation fails.
|
||||
"""
|
||||
raise NotImplementedError("Must implement `validate`")
|
||||
|
||||
def invalidate(self, error_msg):
|
||||
# Invalidates an ABI type with the given error message. Expects that a
|
||||
# parsimonious node was provided from the original parsing operation
|
||||
# that yielded this type.
|
||||
node = self.node
|
||||
|
||||
raise ABITypeError(
|
||||
"For '{comp_str}' type at column {col} "
|
||||
"in '{type_str}': {error_msg}".format(
|
||||
comp_str=node.text,
|
||||
col=node.start + 1,
|
||||
type_str=node.full_text,
|
||||
error_msg=error_msg,
|
||||
),
|
||||
)
|
||||
|
||||
@property
|
||||
def is_array(self):
|
||||
"""
|
||||
Equal to ``True`` if a type is an array type (i.e. if it has an array
|
||||
dimension list). Otherwise, equal to ``False``.
|
||||
"""
|
||||
return self.arrlist is not None
|
||||
|
||||
@property
|
||||
def is_dynamic(self):
|
||||
"""
|
||||
Equal to ``True`` if a type has a dynamically sized encoding.
|
||||
Otherwise, equal to ``False``.
|
||||
"""
|
||||
raise NotImplementedError("Must implement `is_dynamic`")
|
||||
|
||||
@property
|
||||
def _has_dynamic_arrlist(self):
|
||||
return self.is_array and any(len(dim) == 0 for dim in self.arrlist)
|
||||
|
||||
|
||||
class TupleType(ABIType):
|
||||
"""
|
||||
Represents the result of parsing a tuple type string e.g. "(int,bool)".
|
||||
"""
|
||||
|
||||
__slots__ = ("components",)
|
||||
|
||||
def __init__(self, components, arrlist=None, *, node=None):
|
||||
super().__init__(arrlist, node)
|
||||
|
||||
self.components = components
|
||||
"""
|
||||
A tuple of :class:`~eth_abi.grammar.ABIType` instances for each of the
|
||||
tuple type's components.
|
||||
"""
|
||||
|
||||
def to_type_str(self):
|
||||
arrlist = self.arrlist
|
||||
|
||||
if isinstance(arrlist, tuple):
|
||||
arrlist = "".join(repr(list(a)) for a in arrlist)
|
||||
else:
|
||||
arrlist = ""
|
||||
|
||||
return "({}){}".format(
|
||||
",".join(c.to_type_str() for c in self.components),
|
||||
arrlist,
|
||||
)
|
||||
|
||||
@property
|
||||
def item_type(self):
|
||||
if not self.is_array:
|
||||
raise ValueError(
|
||||
"Cannot determine item type for non-array type '{}'".format(
|
||||
self.to_type_str(),
|
||||
)
|
||||
)
|
||||
|
||||
return type(self)(
|
||||
self.components,
|
||||
self.arrlist[:-1] or None,
|
||||
node=self.node,
|
||||
)
|
||||
|
||||
def validate(self):
|
||||
for c in self.components:
|
||||
c.validate()
|
||||
|
||||
@property
|
||||
def is_dynamic(self):
|
||||
if self._has_dynamic_arrlist:
|
||||
return True
|
||||
|
||||
return any(c.is_dynamic for c in self.components)
|
||||
|
||||
|
||||
class BasicType(ABIType):
|
||||
"""
|
||||
Represents the result of parsing a basic type string e.g. "uint", "address",
|
||||
"ufixed128x19[][2]".
|
||||
"""
|
||||
|
||||
__slots__ = ("base", "sub")
|
||||
|
||||
def __init__(self, base, sub=None, arrlist=None, *, node=None):
|
||||
super().__init__(arrlist, node)
|
||||
|
||||
self.base = base
|
||||
"""The base of a basic type e.g. "uint" for "uint256" etc."""
|
||||
|
||||
self.sub = sub
|
||||
"""
|
||||
The sub type of a basic type e.g. ``256`` for "uint256" or ``(128, 18)``
|
||||
for "ufixed128x18" etc. Equal to ``None`` if type string has no sub
|
||||
type.
|
||||
"""
|
||||
|
||||
def to_type_str(self):
|
||||
sub, arrlist = self.sub, self.arrlist
|
||||
|
||||
if isinstance(sub, int):
|
||||
sub = str(sub)
|
||||
elif isinstance(sub, tuple):
|
||||
sub = "x".join(str(s) for s in sub)
|
||||
else:
|
||||
sub = ""
|
||||
|
||||
if isinstance(arrlist, tuple):
|
||||
arrlist = "".join(repr(list(a)) for a in arrlist)
|
||||
else:
|
||||
arrlist = ""
|
||||
|
||||
return self.base + sub + arrlist
|
||||
|
||||
@property
|
||||
def item_type(self):
|
||||
if not self.is_array:
|
||||
raise ValueError(
|
||||
"Cannot determine item type for non-array type '{}'".format(
|
||||
self.to_type_str(),
|
||||
)
|
||||
)
|
||||
|
||||
return type(self)(
|
||||
self.base,
|
||||
self.sub,
|
||||
self.arrlist[:-1] or None,
|
||||
node=self.node,
|
||||
)
|
||||
|
||||
@property
|
||||
def is_dynamic(self):
|
||||
if self._has_dynamic_arrlist:
|
||||
return True
|
||||
|
||||
if self.base == "string":
|
||||
return True
|
||||
|
||||
if self.base == "bytes" and self.sub is None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def validate(self):
|
||||
base, sub = self.base, self.sub
|
||||
|
||||
# Check validity of string type
|
||||
if base == "string":
|
||||
if sub is not None:
|
||||
self.invalidate("string type cannot have suffix")
|
||||
|
||||
# Check validity of bytes type
|
||||
elif base == "bytes":
|
||||
if not (sub is None or isinstance(sub, int)):
|
||||
self.invalidate(
|
||||
"bytes type must have either no suffix or a numerical suffix"
|
||||
)
|
||||
|
||||
if isinstance(sub, int) and sub > 32:
|
||||
self.invalidate("maximum 32 bytes for fixed-length bytes")
|
||||
|
||||
# Check validity of integer type
|
||||
elif base in ("int", "uint"):
|
||||
if not isinstance(sub, int):
|
||||
self.invalidate("integer type must have numerical suffix")
|
||||
|
||||
if sub < 8 or 256 < sub:
|
||||
self.invalidate("integer size out of bounds (max 256 bits)")
|
||||
|
||||
if sub % 8 != 0:
|
||||
self.invalidate("integer size must be multiple of 8")
|
||||
|
||||
# Check validity of fixed type
|
||||
elif base in ("fixed", "ufixed"):
|
||||
if not isinstance(sub, tuple):
|
||||
self.invalidate(
|
||||
"fixed type must have suffix of form <bits>x<exponent>, "
|
||||
"e.g. 128x19",
|
||||
)
|
||||
|
||||
bits, minus_e = sub
|
||||
|
||||
if bits < 8 or 256 < bits:
|
||||
self.invalidate("fixed size out of bounds (max 256 bits)")
|
||||
|
||||
if bits % 8 != 0:
|
||||
self.invalidate("fixed size must be multiple of 8")
|
||||
|
||||
if minus_e < 1 or 80 < minus_e:
|
||||
self.invalidate(
|
||||
"fixed exponent size out of bounds, {} must be in 1-80".format(
|
||||
minus_e,
|
||||
),
|
||||
)
|
||||
|
||||
# Check validity of hash type
|
||||
elif base == "hash":
|
||||
if not isinstance(sub, int):
|
||||
self.invalidate("hash type must have numerical suffix")
|
||||
|
||||
# Check validity of address type
|
||||
elif base == "address":
|
||||
if sub is not None:
|
||||
self.invalidate("address cannot have suffix")
|
||||
|
||||
|
||||
TYPE_ALIASES = {
|
||||
"int": "int256",
|
||||
"uint": "uint256",
|
||||
"fixed": "fixed128x18",
|
||||
"ufixed": "ufixed128x18",
|
||||
"function": "bytes24",
|
||||
"byte": "bytes1",
|
||||
}
|
||||
|
||||
TYPE_ALIAS_RE = re.compile(
|
||||
r"\b({})\b".format("|".join(re.escape(a) for a in TYPE_ALIASES.keys()))
|
||||
)
|
||||
|
||||
|
||||
def normalize(type_str):
|
||||
"""
|
||||
Normalizes a type string into its canonical version e.g. the type string
|
||||
'int' becomes 'int256', etc.
|
||||
|
||||
:param type_str: The type string to be normalized.
|
||||
:returns: The canonical version of the input type string.
|
||||
"""
|
||||
return TYPE_ALIAS_RE.sub(
|
||||
lambda match: TYPE_ALIASES[match.group(0)],
|
||||
type_str,
|
||||
)
|
||||
|
||||
|
||||
parse = visitor.parse
|
||||
13
ccxt/static_dependencies/ethereum/abi/packed.py
Normal file
13
ccxt/static_dependencies/ethereum/abi/packed.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from .codec import (
|
||||
ABIEncoder,
|
||||
)
|
||||
from .registry import (
|
||||
registry_packed,
|
||||
)
|
||||
|
||||
default_encoder_packed = ABIEncoder(registry_packed)
|
||||
|
||||
encode_packed = default_encoder_packed.encode
|
||||
is_encodable_packed = default_encoder_packed.is_encodable
|
||||
encode_single_packed = default_encoder_packed.encode_single # deprecated
|
||||
encode_abi_packed = default_encoder_packed.encode_abi # deprecated
|
||||
0
ccxt/static_dependencies/ethereum/abi/py.typed
Normal file
0
ccxt/static_dependencies/ethereum/abi/py.typed
Normal file
643
ccxt/static_dependencies/ethereum/abi/registry.py
Normal file
643
ccxt/static_dependencies/ethereum/abi/registry.py
Normal file
@@ -0,0 +1,643 @@
|
||||
import abc
|
||||
import copy
|
||||
import functools
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from ..typing import (
|
||||
abi,
|
||||
)
|
||||
|
||||
from . import (
|
||||
decoding,
|
||||
encoding,
|
||||
exceptions,
|
||||
grammar,
|
||||
)
|
||||
from .base import (
|
||||
BaseCoder,
|
||||
)
|
||||
from .exceptions import (
|
||||
ABITypeError,
|
||||
MultipleEntriesFound,
|
||||
NoEntriesFound,
|
||||
)
|
||||
|
||||
Lookup = Union[abi.TypeStr, Callable[[abi.TypeStr], bool]]
|
||||
|
||||
EncoderCallable = Callable[[Any], bytes]
|
||||
DecoderCallable = Callable[[decoding.ContextFramesBytesIO], Any]
|
||||
|
||||
Encoder = Union[EncoderCallable, Type[encoding.BaseEncoder]]
|
||||
Decoder = Union[DecoderCallable, Type[decoding.BaseDecoder]]
|
||||
|
||||
|
||||
class Copyable(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def copy(self):
|
||||
pass
|
||||
|
||||
def __copy__(self):
|
||||
return self.copy()
|
||||
|
||||
def __deepcopy__(self, *args):
|
||||
return self.copy()
|
||||
|
||||
|
||||
class PredicateMapping(Copyable):
|
||||
"""
|
||||
Acts as a mapping from predicate functions to values. Values are retrieved
|
||||
when their corresponding predicate matches a given input. Predicates can
|
||||
also be labeled to facilitate removal from the mapping.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._values = {}
|
||||
self._labeled_predicates = {}
|
||||
|
||||
def add(self, predicate, value, label=None):
|
||||
if predicate in self._values:
|
||||
raise ValueError(
|
||||
"Matcher {} already exists in {}".format(
|
||||
repr(predicate),
|
||||
self._name,
|
||||
)
|
||||
)
|
||||
|
||||
if label is not None:
|
||||
if label in self._labeled_predicates:
|
||||
raise ValueError(
|
||||
"Matcher {} with label '{}' already exists in {}".format(
|
||||
repr(predicate),
|
||||
label,
|
||||
self._name,
|
||||
),
|
||||
)
|
||||
|
||||
self._labeled_predicates[label] = predicate
|
||||
|
||||
self._values[predicate] = value
|
||||
|
||||
def find(self, type_str):
|
||||
results = tuple(
|
||||
(predicate, value)
|
||||
for predicate, value in self._values.items()
|
||||
if predicate(type_str)
|
||||
)
|
||||
|
||||
if len(results) == 0:
|
||||
raise NoEntriesFound(
|
||||
"No matching entries for '{}' in {}".format(
|
||||
type_str,
|
||||
self._name,
|
||||
)
|
||||
)
|
||||
|
||||
predicates, values = tuple(zip(*results))
|
||||
|
||||
if len(results) > 1:
|
||||
predicate_reprs = ", ".join(map(repr, predicates))
|
||||
raise MultipleEntriesFound(
|
||||
f"Multiple matching entries for '{type_str}' in {self._name}: "
|
||||
f"{predicate_reprs}. This occurs when two registrations match the "
|
||||
"same type string. You may need to delete one of the "
|
||||
"registrations or modify its matching behavior to ensure it "
|
||||
'doesn\'t collide with other registrations. See the "Registry" '
|
||||
"documentation for more information."
|
||||
)
|
||||
|
||||
return values[0]
|
||||
|
||||
def remove_by_equality(self, predicate):
|
||||
# Delete the predicate mapping to the previously stored value
|
||||
try:
|
||||
del self._values[predicate]
|
||||
except KeyError:
|
||||
raise KeyError(
|
||||
"Matcher {} not found in {}".format(
|
||||
repr(predicate),
|
||||
self._name,
|
||||
)
|
||||
)
|
||||
|
||||
# Delete any label which refers to this predicate
|
||||
try:
|
||||
label = self._label_for_predicate(predicate)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
del self._labeled_predicates[label]
|
||||
|
||||
def _label_for_predicate(self, predicate):
|
||||
# Both keys and values in `_labeled_predicates` are unique since the
|
||||
# `add` method enforces this
|
||||
for key, value in self._labeled_predicates.items():
|
||||
if value is predicate:
|
||||
return key
|
||||
|
||||
raise ValueError(
|
||||
"Matcher {} not referred to by any label in {}".format(
|
||||
repr(predicate),
|
||||
self._name,
|
||||
)
|
||||
)
|
||||
|
||||
def remove_by_label(self, label):
|
||||
try:
|
||||
predicate = self._labeled_predicates[label]
|
||||
except KeyError:
|
||||
raise KeyError("Label '{}' not found in {}".format(label, self._name))
|
||||
|
||||
del self._labeled_predicates[label]
|
||||
del self._values[predicate]
|
||||
|
||||
def remove(self, predicate_or_label):
|
||||
if callable(predicate_or_label):
|
||||
self.remove_by_equality(predicate_or_label)
|
||||
elif isinstance(predicate_or_label, str):
|
||||
self.remove_by_label(predicate_or_label)
|
||||
else:
|
||||
raise TypeError(
|
||||
"Key to be removed must be callable or string: got {}".format(
|
||||
type(predicate_or_label),
|
||||
)
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
cpy = type(self)(self._name)
|
||||
|
||||
cpy._values = copy.copy(self._values)
|
||||
cpy._labeled_predicates = copy.copy(self._labeled_predicates)
|
||||
|
||||
return cpy
|
||||
|
||||
|
||||
class Predicate:
|
||||
"""
|
||||
Represents a predicate function to be used for type matching in
|
||||
``ABIRegistry``.
|
||||
"""
|
||||
|
||||
__slots__ = tuple()
|
||||
|
||||
def __call__(self, *args, **kwargs): # pragma: no cover
|
||||
raise NotImplementedError("Must implement `__call__`")
|
||||
|
||||
def __str__(self): # pragma: no cover
|
||||
raise NotImplementedError("Must implement `__str__`")
|
||||
|
||||
def __repr__(self):
|
||||
return "<{} {}>".format(type(self).__name__, self)
|
||||
|
||||
def __iter__(self):
|
||||
for attr in self.__slots__:
|
||||
yield getattr(self, attr)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(tuple(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
return type(self) is type(other) and tuple(self) == tuple(other)
|
||||
|
||||
|
||||
class Equals(Predicate):
|
||||
"""
|
||||
A predicate that matches any input equal to `value`.
|
||||
"""
|
||||
|
||||
__slots__ = ("value",)
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __call__(self, other):
|
||||
return self.value == other
|
||||
|
||||
def __str__(self):
|
||||
return "(== {})".format(repr(self.value))
|
||||
|
||||
|
||||
class BaseEquals(Predicate):
|
||||
"""
|
||||
A predicate that matches a basic type string with a base component equal to
|
||||
`value` and no array component. If `with_sub` is `True`, the type string
|
||||
must have a sub component to match. If `with_sub` is `False`, the type
|
||||
string must *not* have a sub component to match. If `with_sub` is None,
|
||||
the type string's sub component is ignored.
|
||||
"""
|
||||
|
||||
__slots__ = ("base", "with_sub")
|
||||
|
||||
def __init__(self, base, *, with_sub=None):
|
||||
self.base = base
|
||||
self.with_sub = with_sub
|
||||
|
||||
def __call__(self, type_str):
|
||||
try:
|
||||
abi_type = grammar.parse(type_str)
|
||||
except exceptions.ParseError:
|
||||
return False
|
||||
|
||||
if isinstance(abi_type, grammar.BasicType):
|
||||
if abi_type.arrlist is not None:
|
||||
return False
|
||||
|
||||
if self.with_sub is not None:
|
||||
if self.with_sub and abi_type.sub is None:
|
||||
return False
|
||||
if not self.with_sub and abi_type.sub is not None:
|
||||
return False
|
||||
|
||||
return abi_type.base == self.base
|
||||
|
||||
# We'd reach this point if `type_str` did not contain a basic type
|
||||
# e.g. if it contained a tuple type
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
return "(base == {}{})".format(
|
||||
repr(self.base),
|
||||
""
|
||||
if self.with_sub is None
|
||||
else (" and sub is not None" if self.with_sub else " and sub is None"),
|
||||
)
|
||||
|
||||
|
||||
def has_arrlist(type_str):
|
||||
"""
|
||||
A predicate that matches a type string with an array dimension list.
|
||||
"""
|
||||
try:
|
||||
abi_type = grammar.parse(type_str)
|
||||
except exceptions.ParseError:
|
||||
return False
|
||||
|
||||
return abi_type.arrlist is not None
|
||||
|
||||
|
||||
def is_base_tuple(type_str):
|
||||
"""
|
||||
A predicate that matches a tuple type with no array dimension list.
|
||||
"""
|
||||
try:
|
||||
abi_type = grammar.parse(type_str)
|
||||
except exceptions.ParseError:
|
||||
return False
|
||||
|
||||
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None
|
||||
|
||||
|
||||
def _clear_encoder_cache(old_method):
|
||||
@functools.wraps(old_method)
|
||||
def new_method(self, *args, **kwargs):
|
||||
self.get_encoder.cache_clear()
|
||||
return old_method(self, *args, **kwargs)
|
||||
|
||||
return new_method
|
||||
|
||||
|
||||
def _clear_decoder_cache(old_method):
|
||||
@functools.wraps(old_method)
|
||||
def new_method(self, *args, **kwargs):
|
||||
self.get_decoder.cache_clear()
|
||||
return old_method(self, *args, **kwargs)
|
||||
|
||||
return new_method
|
||||
|
||||
|
||||
class BaseRegistry:
|
||||
@staticmethod
|
||||
def _register(mapping, lookup, value, label=None):
|
||||
if callable(lookup):
|
||||
mapping.add(lookup, value, label)
|
||||
return
|
||||
|
||||
if isinstance(lookup, str):
|
||||
mapping.add(Equals(lookup), value, lookup)
|
||||
return
|
||||
|
||||
raise TypeError(
|
||||
"Lookup must be a callable or a value of type `str`: got {}".format(
|
||||
repr(lookup),
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _unregister(mapping, lookup_or_label):
|
||||
if callable(lookup_or_label):
|
||||
mapping.remove_by_equality(lookup_or_label)
|
||||
return
|
||||
|
||||
if isinstance(lookup_or_label, str):
|
||||
mapping.remove_by_label(lookup_or_label)
|
||||
return
|
||||
|
||||
raise TypeError(
|
||||
"Lookup/label must be a callable or a value of type `str`: got {}".format(
|
||||
repr(lookup_or_label),
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _get_registration(mapping, type_str):
|
||||
try:
|
||||
value = mapping.find(type_str)
|
||||
except ValueError as e:
|
||||
if "No matching" in e.args[0]:
|
||||
# If no matches found, attempt to parse in case lack of matches
|
||||
# was due to unparsability
|
||||
grammar.parse(type_str)
|
||||
|
||||
raise
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class ABIRegistry(Copyable, BaseRegistry):
|
||||
def __init__(self):
|
||||
self._encoders = PredicateMapping("encoder registry")
|
||||
self._decoders = PredicateMapping("decoder registry")
|
||||
|
||||
def _get_registration(self, mapping, type_str):
|
||||
coder = super()._get_registration(mapping, type_str)
|
||||
|
||||
if isinstance(coder, type) and issubclass(coder, BaseCoder):
|
||||
return coder.from_type_str(type_str, self)
|
||||
|
||||
return coder
|
||||
|
||||
@_clear_encoder_cache
|
||||
def register_encoder(
|
||||
self, lookup: Lookup, encoder: Encoder, label: str = None
|
||||
) -> None:
|
||||
"""
|
||||
Registers the given ``encoder`` under the given ``lookup``. A unique
|
||||
string label may be optionally provided that can be used to refer to
|
||||
the registration by name. For more information about arguments, refer
|
||||
to :any:`register`.
|
||||
"""
|
||||
self._register(self._encoders, lookup, encoder, label=label)
|
||||
|
||||
@_clear_encoder_cache
|
||||
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
|
||||
"""
|
||||
Unregisters an encoder in the registry with the given lookup or label.
|
||||
If ``lookup_or_label`` is a string, the encoder with the label
|
||||
``lookup_or_label`` will be unregistered. If it is an function, the
|
||||
encoder with the lookup function ``lookup_or_label`` will be
|
||||
unregistered.
|
||||
"""
|
||||
self._unregister(self._encoders, lookup_or_label)
|
||||
|
||||
@_clear_decoder_cache
|
||||
def register_decoder(
|
||||
self, lookup: Lookup, decoder: Decoder, label: str = None
|
||||
) -> None:
|
||||
"""
|
||||
Registers the given ``decoder`` under the given ``lookup``. A unique
|
||||
string label may be optionally provided that can be used to refer to
|
||||
the registration by name. For more information about arguments, refer
|
||||
to :any:`register`.
|
||||
"""
|
||||
self._register(self._decoders, lookup, decoder, label=label)
|
||||
|
||||
@_clear_decoder_cache
|
||||
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
|
||||
"""
|
||||
Unregisters a decoder in the registry with the given lookup or label.
|
||||
If ``lookup_or_label`` is a string, the decoder with the label
|
||||
``lookup_or_label`` will be unregistered. If it is an function, the
|
||||
decoder with the lookup function ``lookup_or_label`` will be
|
||||
unregistered.
|
||||
"""
|
||||
self._unregister(self._decoders, lookup_or_label)
|
||||
|
||||
def register(
|
||||
self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str = None
|
||||
) -> None:
|
||||
"""
|
||||
Registers the given ``encoder`` and ``decoder`` under the given
|
||||
``lookup``. A unique string label may be optionally provided that can
|
||||
be used to refer to the registration by name.
|
||||
|
||||
:param lookup: A type string or type string matcher function
|
||||
(predicate). When the registry is queried with a type string
|
||||
``query`` to determine which encoder or decoder to use, ``query``
|
||||
will be checked against every registration in the registry. If a
|
||||
registration was created with a type string for ``lookup``, it will
|
||||
be considered a match if ``lookup == query``. If a registration
|
||||
was created with a matcher function for ``lookup``, it will be
|
||||
considered a match if ``lookup(query) is True``. If more than one
|
||||
registration is found to be a match, then an exception is raised.
|
||||
|
||||
:param encoder: An encoder callable or class to use if ``lookup``
|
||||
matches a query. If ``encoder`` is a callable, it must accept a
|
||||
python value and return a ``bytes`` value. If ``encoder`` is a
|
||||
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
|
||||
and must also implement the :any:`from_type_str` method on
|
||||
:any:`base.BaseCoder`.
|
||||
|
||||
:param decoder: A decoder callable or class to use if ``lookup``
|
||||
matches a query. If ``decoder`` is a callable, it must accept a
|
||||
stream-like object of bytes and return a python value. If
|
||||
``decoder`` is a class, it must be a valid subclass of
|
||||
:any:`decoding.BaseDecoder` and must also implement the
|
||||
:any:`from_type_str` method on :any:`base.BaseCoder`.
|
||||
|
||||
:param label: An optional label that can be used to refer to this
|
||||
registration by name. This label can be used to unregister an
|
||||
entry in the registry via the :any:`unregister` method and its
|
||||
variants.
|
||||
"""
|
||||
self.register_encoder(lookup, encoder, label=label)
|
||||
self.register_decoder(lookup, decoder, label=label)
|
||||
|
||||
def unregister(self, label: str) -> None:
|
||||
"""
|
||||
Unregisters the entries in the encoder and decoder registries which
|
||||
have the label ``label``.
|
||||
"""
|
||||
self.unregister_encoder(label)
|
||||
self.unregister_decoder(label)
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def get_encoder(self, type_str):
|
||||
return self._get_registration(self._encoders, type_str)
|
||||
|
||||
def has_encoder(self, type_str: abi.TypeStr) -> bool:
|
||||
"""
|
||||
Returns ``True`` if an encoder is found for the given type string
|
||||
``type_str``. Otherwise, returns ``False``. Raises
|
||||
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
|
||||
are found.
|
||||
"""
|
||||
try:
|
||||
self.get_encoder(type_str)
|
||||
except (ABITypeError, NoEntriesFound):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def get_decoder(self, type_str):
|
||||
return self._get_registration(self._decoders, type_str)
|
||||
|
||||
def copy(self):
|
||||
"""
|
||||
Copies a registry such that new registrations can be made or existing
|
||||
registrations can be unregistered without affecting any instance from
|
||||
which a copy was obtained. This is useful if an existing registry
|
||||
fulfills most of a user's needs but requires one or two modifications.
|
||||
In that case, a copy of that registry can be obtained and the necessary
|
||||
changes made without affecting the original registry.
|
||||
"""
|
||||
cpy = type(self)()
|
||||
|
||||
cpy._encoders = copy.copy(self._encoders)
|
||||
cpy._decoders = copy.copy(self._decoders)
|
||||
|
||||
return cpy
|
||||
|
||||
|
||||
registry = ABIRegistry()
|
||||
|
||||
registry.register(
|
||||
BaseEquals("uint"),
|
||||
encoding.UnsignedIntegerEncoder,
|
||||
decoding.UnsignedIntegerDecoder,
|
||||
label="uint",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("int"),
|
||||
encoding.SignedIntegerEncoder,
|
||||
decoding.SignedIntegerDecoder,
|
||||
label="int",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("address"),
|
||||
encoding.AddressEncoder,
|
||||
decoding.AddressDecoder,
|
||||
label="address",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("bool"),
|
||||
encoding.BooleanEncoder,
|
||||
decoding.BooleanDecoder,
|
||||
label="bool",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("ufixed"),
|
||||
encoding.UnsignedFixedEncoder,
|
||||
decoding.UnsignedFixedDecoder,
|
||||
label="ufixed",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("fixed"),
|
||||
encoding.SignedFixedEncoder,
|
||||
decoding.SignedFixedDecoder,
|
||||
label="fixed",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("bytes", with_sub=True),
|
||||
encoding.BytesEncoder,
|
||||
decoding.BytesDecoder,
|
||||
label="bytes<M>",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("bytes", with_sub=False),
|
||||
encoding.ByteStringEncoder,
|
||||
decoding.ByteStringDecoder,
|
||||
label="bytes",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("function"),
|
||||
encoding.BytesEncoder,
|
||||
decoding.BytesDecoder,
|
||||
label="function",
|
||||
)
|
||||
registry.register(
|
||||
BaseEquals("string"),
|
||||
encoding.TextStringEncoder,
|
||||
decoding.StringDecoder,
|
||||
label="string",
|
||||
)
|
||||
registry.register(
|
||||
has_arrlist,
|
||||
encoding.BaseArrayEncoder,
|
||||
decoding.BaseArrayDecoder,
|
||||
label="has_arrlist",
|
||||
)
|
||||
registry.register(
|
||||
is_base_tuple,
|
||||
encoding.TupleEncoder,
|
||||
decoding.TupleDecoder,
|
||||
label="is_base_tuple",
|
||||
)
|
||||
|
||||
registry_packed = ABIRegistry()
|
||||
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("uint"),
|
||||
encoding.PackedUnsignedIntegerEncoder,
|
||||
label="uint",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("int"),
|
||||
encoding.PackedSignedIntegerEncoder,
|
||||
label="int",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("address"),
|
||||
encoding.PackedAddressEncoder,
|
||||
label="address",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("bool"),
|
||||
encoding.PackedBooleanEncoder,
|
||||
label="bool",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("ufixed"),
|
||||
encoding.PackedUnsignedFixedEncoder,
|
||||
label="ufixed",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("fixed"),
|
||||
encoding.PackedSignedFixedEncoder,
|
||||
label="fixed",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("bytes", with_sub=True),
|
||||
encoding.PackedBytesEncoder,
|
||||
label="bytes<M>",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("bytes", with_sub=False),
|
||||
encoding.PackedByteStringEncoder,
|
||||
label="bytes",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("function"),
|
||||
encoding.PackedBytesEncoder,
|
||||
label="function",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
BaseEquals("string"),
|
||||
encoding.PackedTextStringEncoder,
|
||||
label="string",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
has_arrlist,
|
||||
encoding.PackedArrayEncoder,
|
||||
label="has_arrlist",
|
||||
)
|
||||
registry_packed.register_encoder(
|
||||
is_base_tuple,
|
||||
encoding.TupleEncoder,
|
||||
label="is_base_tuple",
|
||||
)
|
||||
3
ccxt/static_dependencies/ethereum/abi/tools/__init__.py
Normal file
3
ccxt/static_dependencies/ethereum/abi/tools/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from ._strategies import ( # noqa: F401
|
||||
get_abi_strategy,
|
||||
)
|
||||
230
ccxt/static_dependencies/ethereum/abi/tools/_strategies.py
Normal file
230
ccxt/static_dependencies/ethereum/abi/tools/_strategies.py
Normal file
@@ -0,0 +1,230 @@
|
||||
from typing import (
|
||||
Callable,
|
||||
Union,
|
||||
)
|
||||
|
||||
from ...typing.abi import (
|
||||
TypeStr,
|
||||
)
|
||||
from ..utils import (
|
||||
to_checksum_address,
|
||||
)
|
||||
from hypothesis import (
|
||||
strategies as st,
|
||||
)
|
||||
|
||||
from ..grammar import (
|
||||
ABIType,
|
||||
normalize,
|
||||
parse,
|
||||
)
|
||||
from ..registry import (
|
||||
BaseEquals,
|
||||
BaseRegistry,
|
||||
Lookup,
|
||||
PredicateMapping,
|
||||
has_arrlist,
|
||||
is_base_tuple,
|
||||
)
|
||||
from ..utils.numeric import (
|
||||
scale_places,
|
||||
)
|
||||
|
||||
StrategyFactory = Callable[[ABIType, "StrategyRegistry"], st.SearchStrategy]
|
||||
StrategyRegistration = Union[st.SearchStrategy, StrategyFactory]
|
||||
|
||||
|
||||
class StrategyRegistry(BaseRegistry):
|
||||
def __init__(self):
|
||||
self._strategies = PredicateMapping("strategy registry")
|
||||
|
||||
def register_strategy(
|
||||
self, lookup: Lookup, registration: StrategyRegistration, label: str = None
|
||||
) -> None:
|
||||
self._register(self._strategies, lookup, registration, label=label)
|
||||
|
||||
def unregister_strategy(self, lookup_or_label: Lookup) -> None:
|
||||
self._unregister(self._strategies, lookup_or_label)
|
||||
|
||||
def get_strategy(self, type_str: TypeStr) -> st.SearchStrategy:
|
||||
"""
|
||||
Returns a hypothesis strategy for the given ABI type.
|
||||
|
||||
:param type_str: The canonical string representation of the ABI type
|
||||
for which a hypothesis strategy should be returned.
|
||||
|
||||
:returns: A hypothesis strategy for generating Python values that are
|
||||
encodable as values of the given ABI type.
|
||||
"""
|
||||
registration = self._get_registration(self._strategies, type_str)
|
||||
|
||||
if isinstance(registration, st.SearchStrategy):
|
||||
# If a hypothesis strategy was registered, just return it
|
||||
return registration
|
||||
else:
|
||||
# Otherwise, assume the factory is a callable. Call it with the abi
|
||||
# type to get an appropriate hypothesis strategy.
|
||||
normalized_type_str = normalize(type_str)
|
||||
abi_type = parse(normalized_type_str)
|
||||
strategy = registration(abi_type, self)
|
||||
|
||||
return strategy
|
||||
|
||||
|
||||
def get_uint_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
bits = abi_type.sub
|
||||
|
||||
return st.integers(
|
||||
min_value=0,
|
||||
max_value=2**bits - 1,
|
||||
)
|
||||
|
||||
|
||||
def get_int_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
bits = abi_type.sub
|
||||
|
||||
return st.integers(
|
||||
min_value=-(2 ** (bits - 1)),
|
||||
max_value=2 ** (bits - 1) - 1,
|
||||
)
|
||||
|
||||
|
||||
address_strategy = st.binary(min_size=20, max_size=20).map(to_checksum_address)
|
||||
bool_strategy = st.booleans()
|
||||
|
||||
|
||||
def get_ufixed_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
bits, places = abi_type.sub
|
||||
|
||||
return st.decimals(
|
||||
min_value=0,
|
||||
max_value=2**bits - 1,
|
||||
places=0,
|
||||
).map(scale_places(places))
|
||||
|
||||
|
||||
def get_fixed_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
bits, places = abi_type.sub
|
||||
|
||||
return st.decimals(
|
||||
min_value=-(2 ** (bits - 1)),
|
||||
max_value=2 ** (bits - 1) - 1,
|
||||
places=0,
|
||||
).map(scale_places(places))
|
||||
|
||||
|
||||
def get_bytes_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
num_bytes = abi_type.sub
|
||||
|
||||
return st.binary(
|
||||
min_size=num_bytes,
|
||||
max_size=num_bytes,
|
||||
)
|
||||
|
||||
|
||||
bytes_strategy = st.binary(min_size=0, max_size=4096)
|
||||
string_strategy = st.text()
|
||||
|
||||
|
||||
def get_array_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
item_type = abi_type.item_type
|
||||
item_type_str = item_type.to_type_str()
|
||||
item_strategy = registry.get_strategy(item_type_str)
|
||||
|
||||
last_dim = abi_type.arrlist[-1]
|
||||
if len(last_dim) == 0:
|
||||
# Is dynamic list. Don't restrict length.
|
||||
return st.lists(item_strategy)
|
||||
else:
|
||||
# Is static list. Restrict length.
|
||||
dim_size = last_dim[0]
|
||||
return st.lists(item_strategy, min_size=dim_size, max_size=dim_size)
|
||||
|
||||
|
||||
def get_tuple_strategy(
|
||||
abi_type: ABIType, registry: StrategyRegistry
|
||||
) -> st.SearchStrategy:
|
||||
component_strategies = [
|
||||
registry.get_strategy(comp_abi_type.to_type_str())
|
||||
for comp_abi_type in abi_type.components
|
||||
]
|
||||
|
||||
return st.tuples(*component_strategies)
|
||||
|
||||
|
||||
strategy_registry = StrategyRegistry()
|
||||
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("uint"),
|
||||
get_uint_strategy,
|
||||
label="uint",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("int"),
|
||||
get_int_strategy,
|
||||
label="int",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("address", with_sub=False),
|
||||
address_strategy,
|
||||
label="address",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("bool", with_sub=False),
|
||||
bool_strategy,
|
||||
label="bool",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("ufixed"),
|
||||
get_ufixed_strategy,
|
||||
label="ufixed",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("fixed"),
|
||||
get_fixed_strategy,
|
||||
label="fixed",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("bytes", with_sub=True),
|
||||
get_bytes_strategy,
|
||||
label="bytes<M>",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("bytes", with_sub=False),
|
||||
bytes_strategy,
|
||||
label="bytes",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("function", with_sub=False),
|
||||
get_bytes_strategy,
|
||||
label="function",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
BaseEquals("string", with_sub=False),
|
||||
string_strategy,
|
||||
label="string",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
has_arrlist,
|
||||
get_array_strategy,
|
||||
label="has_arrlist",
|
||||
)
|
||||
strategy_registry.register_strategy(
|
||||
is_base_tuple,
|
||||
get_tuple_strategy,
|
||||
label="is_base_tuple",
|
||||
)
|
||||
|
||||
get_abi_strategy = strategy_registry.get_strategy
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
83
ccxt/static_dependencies/ethereum/abi/utils/numeric.py
Normal file
83
ccxt/static_dependencies/ethereum/abi/utils/numeric.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import decimal
|
||||
from typing import (
|
||||
Callable,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
ABI_DECIMAL_PREC = 999
|
||||
|
||||
abi_decimal_context = decimal.Context(prec=ABI_DECIMAL_PREC)
|
||||
|
||||
ZERO = decimal.Decimal(0)
|
||||
TEN = decimal.Decimal(10)
|
||||
|
||||
|
||||
def ceil32(x: int) -> int:
|
||||
return x if x % 32 == 0 else x + 32 - (x % 32)
|
||||
|
||||
|
||||
def compute_unsigned_integer_bounds(num_bits: int) -> Tuple[int, int]:
|
||||
return (
|
||||
0,
|
||||
2**num_bits - 1,
|
||||
)
|
||||
|
||||
|
||||
def compute_signed_integer_bounds(num_bits: int) -> Tuple[int, int]:
|
||||
return (
|
||||
-1 * 2 ** (num_bits - 1),
|
||||
2 ** (num_bits - 1) - 1,
|
||||
)
|
||||
|
||||
|
||||
def compute_unsigned_fixed_bounds(
|
||||
num_bits: int,
|
||||
frac_places: int,
|
||||
) -> Tuple[decimal.Decimal, decimal.Decimal]:
|
||||
int_upper = compute_unsigned_integer_bounds(num_bits)[1]
|
||||
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
upper = decimal.Decimal(int_upper) * TEN**-frac_places
|
||||
|
||||
return ZERO, upper
|
||||
|
||||
|
||||
def compute_signed_fixed_bounds(
|
||||
num_bits: int,
|
||||
frac_places: int,
|
||||
) -> Tuple[decimal.Decimal, decimal.Decimal]:
|
||||
int_lower, int_upper = compute_signed_integer_bounds(num_bits)
|
||||
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
exp = TEN**-frac_places
|
||||
lower = decimal.Decimal(int_lower) * exp
|
||||
upper = decimal.Decimal(int_upper) * exp
|
||||
|
||||
return lower, upper
|
||||
|
||||
|
||||
def scale_places(places: int) -> Callable[[decimal.Decimal], decimal.Decimal]:
|
||||
"""
|
||||
Returns a function that shifts the decimal point of decimal values to the
|
||||
right by ``places`` places.
|
||||
"""
|
||||
if not isinstance(places, int):
|
||||
raise ValueError(
|
||||
f"Argument `places` must be int. Got value {places} "
|
||||
f"of type {type(places)}.",
|
||||
)
|
||||
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
scaling_factor = TEN**-places
|
||||
|
||||
def f(x: decimal.Decimal) -> decimal.Decimal:
|
||||
with decimal.localcontext(abi_decimal_context):
|
||||
return x * scaling_factor
|
||||
|
||||
places_repr = f"Eneg{places}" if places > 0 else f"Epos{-places}"
|
||||
func_name = f"scale_by_{places_repr}"
|
||||
|
||||
f.__name__ = func_name
|
||||
f.__qualname__ = func_name
|
||||
|
||||
return f
|
||||
27
ccxt/static_dependencies/ethereum/abi/utils/padding.py
Normal file
27
ccxt/static_dependencies/ethereum/abi/utils/padding.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from ...utils.toolz import (
|
||||
curry,
|
||||
)
|
||||
|
||||
|
||||
@curry
|
||||
def zpad(value: bytes, length: int) -> bytes:
|
||||
return value.rjust(length, b"\x00")
|
||||
|
||||
|
||||
zpad32 = zpad(length=32)
|
||||
|
||||
|
||||
@curry
|
||||
def zpad_right(value: bytes, length: int) -> bytes:
|
||||
return value.ljust(length, b"\x00")
|
||||
|
||||
|
||||
zpad32_right = zpad_right(length=32)
|
||||
|
||||
|
||||
@curry
|
||||
def fpad(value: bytes, length: int) -> bytes:
|
||||
return value.rjust(length, b"\xff")
|
||||
|
||||
|
||||
fpad32 = fpad(length=32)
|
||||
19
ccxt/static_dependencies/ethereum/abi/utils/string.py
Normal file
19
ccxt/static_dependencies/ethereum/abi/utils/string.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
|
||||
def abbr(value: Any, limit: int = 79) -> str:
|
||||
"""
|
||||
Converts a value into its string representation and abbreviates that
|
||||
representation based on the given length `limit` if necessary.
|
||||
"""
|
||||
rep = repr(value)
|
||||
|
||||
if len(rep) > limit:
|
||||
if limit < 3:
|
||||
raise ValueError("Abbreviation limit may not be less than 3")
|
||||
|
||||
rep = rep[: limit - 3] + "..."
|
||||
|
||||
return rep
|
||||
Reference in New Issue
Block a user