This commit is contained in:
lz_db
2025-11-16 12:31:03 +08:00
commit 0fab423a18
1451 changed files with 743213 additions and 0 deletions

View File

@@ -0,0 +1 @@
// TODO: add web3

View File

@@ -0,0 +1 @@
__all__ = ['ecdsa', 'keccak', 'aiohttp_socks', 'ethereum', 'parsimonious', 'toolz', 'starknet', 'marshmallow', 'marshmallow_oneofschema', 'lark', 'starkware', 'sympy']

View File

@@ -0,0 +1,14 @@
from .keys import SigningKey, VerifyingKey, BadSignatureError, BadDigestError
from .curves import NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1
# This code comes from http://github.com/warner/python-ecdsa
#from ._version import get_versions
__version__ = 'ccxt' # custom ccxt version
#del get_versions
__all__ = ["curves", "der", "ecdsa", "ellipticcurve", "keys", "numbertheory",
"util"]
_hush_pyflakes = [SigningKey, VerifyingKey, BadSignatureError, BadDigestError,
NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1]
del _hush_pyflakes

View File

@@ -0,0 +1,520 @@
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "python-ecdsa-"
cfg.parentdir_prefix = "ecdsa-"
cfg.versionfile_source = "ecdsa/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}

View File

@@ -0,0 +1,56 @@
from __future__ import division
from . import der, ecdsa
class UnknownCurveError(Exception):
pass
def orderlen(order):
return (1+len("%x" % order))//2 # bytes
# the NIST curves
class Curve:
def __init__(self, name, curve, generator, oid, openssl_name=None):
self.name = name
self.openssl_name = openssl_name # maybe None
self.curve = curve
self.generator = generator
self.order = generator.order()
self.baselen = orderlen(self.order)
self.verifying_key_length = 2*self.baselen
self.signature_length = 2*self.baselen
self.oid = oid
self.encoded_oid = der.encode_oid(*oid)
NIST192p = Curve("NIST192p", ecdsa.curve_192,
ecdsa.generator_192,
(1, 2, 840, 10045, 3, 1, 1), "prime192v1")
NIST224p = Curve("NIST224p", ecdsa.curve_224,
ecdsa.generator_224,
(1, 3, 132, 0, 33), "secp224r1")
NIST256p = Curve("NIST256p", ecdsa.curve_256,
ecdsa.generator_256,
(1, 2, 840, 10045, 3, 1, 7), "prime256v1")
NIST384p = Curve("NIST384p", ecdsa.curve_384,
ecdsa.generator_384,
(1, 3, 132, 0, 34), "secp384r1")
NIST521p = Curve("NIST521p", ecdsa.curve_521,
ecdsa.generator_521,
(1, 3, 132, 0, 35), "secp521r1")
SECP256k1 = Curve("SECP256k1", ecdsa.curve_secp256k1,
ecdsa.generator_secp256k1,
(1, 3, 132, 0, 10), "secp256k1")
curves = [NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1]
def find_curve(oid_curve):
for c in curves:
if c.oid == oid_curve:
return c
raise UnknownCurveError("I don't know about the curve with oid %s."
"I only know about these: %s" %
(oid_curve, [c.name for c in curves]))

View File

@@ -0,0 +1,221 @@
from __future__ import division
import binascii
import base64
class UnexpectedDER(Exception):
pass
def encode_constructed(tag, value):
return int.to_bytes(0xa0+tag, 1, 'big') + encode_length(len(value)) + value
def encode_integer(r):
assert r >= 0 # can't support negative numbers yet
h = ("%x" % r).encode()
if len(h) % 2:
h = b'0' + h
s = binascii.unhexlify(h)
num = s[0] if isinstance(s[0], int) else ord(s[0])
if num <= 0x7f:
return b'\x02' + int.to_bytes(len(s), 1, 'big') + s
else:
# DER integers are two's complement, so if the first byte is
# 0x80-0xff then we need an extra 0x00 byte to prevent it from
# looking negative.
return b'\x02' + int.to_bytes(len(s)+1, 1, 'big') + b'\x00' + s
def encode_bitstring(s):
return b'\x03' + encode_length(len(s)) + s
def encode_octet_string(s):
return b'\x04' + encode_length(len(s)) + s
def encode_oid(first, second, *pieces):
assert first <= 2
assert second <= 39
encoded_pieces = [int.to_bytes(40*first+second, 1, 'big')] + [encode_number(p)
for p in pieces]
body = b''.join(encoded_pieces)
return b'\x06' + encode_length(len(body)) + body
def encode_sequence(*encoded_pieces):
total_len = sum([len(p) for p in encoded_pieces])
return b'\x30' + encode_length(total_len) + b''.join(encoded_pieces)
def encode_number(n):
b128_digits = []
while n:
b128_digits.insert(0, (n & 0x7f) | 0x80)
n = n >> 7
if not b128_digits:
b128_digits.append(0)
b128_digits[-1] &= 0x7f
return b''.join([int.to_bytes(d, 1, 'big') for d in b128_digits])
def remove_constructed(string):
s0 = string[0] if isinstance(string[0], int) else ord(string[0])
if (s0 & 0xe0) != 0xa0:
raise UnexpectedDER("wanted constructed tag (0xa0-0xbf), got 0x%02x"
% s0)
tag = s0 & 0x1f
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return tag, body, rest
def remove_sequence(string):
if not string.startswith(b'\x30'):
n = string[0] if isinstance(string[0], int) else ord(string[0])
raise UnexpectedDER("wanted sequence (0x30), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
endseq = 1+lengthlength+length
return string[1+lengthlength:endseq], string[endseq:]
def remove_octet_string(string):
if not string.startswith(b'\x04'):
n = string[0] if isinstance(string[0], int) else ord(string[0])
raise UnexpectedDER("wanted octetstring (0x04), got 0x%02x" % n)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
def remove_object(string):
if not string.startswith(b'\x06'):
n = string[0] if isinstance(string[0], int) else ord(string[0])
raise UnexpectedDER("wanted object (0x06), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
body = string[1+lengthlength:1+lengthlength+length]
rest = string[1+lengthlength+length:]
numbers = []
while body:
n, ll = read_number(body)
numbers.append(n)
body = body[ll:]
n0 = numbers.pop(0)
first = n0//40
second = n0-(40*first)
numbers.insert(0, first)
numbers.insert(1, second)
return tuple(numbers), rest
def remove_integer(string):
if not string.startswith(b'\x02'):
n = string[0] if isinstance(string[0], int) else ord(string[0])
raise UnexpectedDER("wanted integer (0x02), got 0x%02x" % n)
length, llen = read_length(string[1:])
numberbytes = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
nbytes = numberbytes[0] if isinstance(numberbytes[0], int) else ord(numberbytes[0])
assert nbytes < 0x80 # can't support negative numbers yet
return int(binascii.hexlify(numberbytes), 16), rest
def read_number(string):
number = 0
llen = 0
# base-128 big endian, with b7 set in all but the last byte
while True:
if llen > len(string):
raise UnexpectedDER("ran out of length bytes")
number = number << 7
d = string[llen] if isinstance(string[llen], int) else ord(string[llen])
number += (d & 0x7f)
llen += 1
if not d & 0x80:
break
return number, llen
def encode_length(l):
assert l >= 0
if l < 0x80:
return int.to_bytes(l, 1, 'big')
s = ("%x" % l).encode()
if len(s) % 2:
s = b'0' + s
s = binascii.unhexlify(s)
llen = len(s)
return int.to_bytes(0x80 | llen, 1, 'big') + s
def read_length(string):
num = string[0] if isinstance(string[0], int) else ord(string[0])
if not (num & 0x80):
# short form
return (num & 0x7f), 1
# else long-form: b0&0x7f is number of additional base256 length bytes,
# big-endian
llen = num & 0x7f
if llen > len(string)-1:
raise UnexpectedDER("ran out of length bytes")
return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen
def remove_bitstring(string):
num = string[0] if isinstance(string[0], int) else ord(string[0])
if not string.startswith(b'\x03'):
raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % num)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
# SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING)
# signatures: (from RFC3279)
# ansi-X9-62 OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) 10045 }
#
# id-ecSigType OBJECT IDENTIFIER ::= {
# ansi-X9-62 signatures(4) }
# ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
# id-ecSigType 1 }
## so 1,2,840,10045,4,1
## so 0x42, .. ..
# Ecdsa-Sig-Value ::= SEQUENCE {
# r INTEGER,
# s INTEGER }
# id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 }
#
# id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 }
# I think the secp224r1 identifier is (t=06,l=05,v=2b81040021)
# secp224r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 33 }
# and the secp384r1 is (t=06,l=05,v=2b81040022)
# secp384r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 34 }
def unpem(pem):
if isinstance(pem, str):
pem = pem.encode()
d = b''.join([l.strip() for l in pem.split(b'\n')
if l and not l.startswith(b'-----')])
return base64.b64decode(d)
def topem(der, name):
b64 = base64.b64encode(der)
lines = [("-----BEGIN %s-----\n" % name).encode()]
lines.extend([b64[start:start+64]+b'\n'
for start in range(0, len(b64), 64)])
lines.append(("-----END %s-----\n" % name).encode())
return b''.join(lines)

View File

@@ -0,0 +1,310 @@
#! /usr/bin/env python
"""
Implementation of Elliptic-Curve Digital Signatures.
Classes and methods for elliptic-curve signatures:
private keys, public keys, signatures,
NIST prime-modulus curves with modulus lengths of
192, 224, 256, 384, and 521 bits.
Example:
# (In real-life applications, you would probably want to
# protect against defects in SystemRandom.)
from random import SystemRandom
randrange = SystemRandom().randrange
# Generate a public/private key pair using the NIST Curve P-192:
g = generator_192
n = g.order()
secret = randrange( 1, n )
pubkey = Public_key( g, g * secret )
privkey = Private_key( pubkey, secret )
# Signing a hash value:
hash = randrange( 1, n )
signature = privkey.sign( hash, randrange( 1, n ) )
# Verifying a signature for a hash value:
if pubkey.verifies( hash, signature ):
print_("Demo verification succeeded.")
else:
print_("*** Demo verification failed.")
# Verification fails if the hash value is modified:
if pubkey.verifies( hash-1, signature ):
print_("**** Demo verification failed to reject tampered hash.")
else:
print_("Demo verification correctly rejected tampered hash.")
Version of 2009.05.16.
Revision history:
2005.12.31 - Initial version.
2008.11.25 - Substantial revisions introducing new classes.
2009.05.16 - Warn against using random.randrange in real applications.
2009.05.17 - Use random.SystemRandom by default.
Written in 2005 by Peter Pearson and placed in the public domain.
"""
from . import ellipticcurve
from . import numbertheory
class RSZeroError(RuntimeError):
pass
class Signature(object):
"""ECDSA signature.
"""
def __init__(self, r, s, recovery_param):
self.r = r
self.s = s
self.recovery_param = recovery_param
def recover_public_keys(self, hash, generator):
"""Returns two public keys for which the signature is valid
hash is signed hash
generator is the used generator of the signature
"""
curve = generator.curve()
n = generator.order()
r = self.r
s = self.s
e = hash
x = r
# Compute the curve point with x as x-coordinate
alpha = (pow(x, 3, curve.p()) + (curve.a() * x) + curve.b()) % curve.p()
beta = numbertheory.square_root_mod_prime(alpha, curve.p())
y = beta if beta % 2 == 0 else curve.p() - beta
# Compute the public key
R1 = ellipticcurve.Point(curve, x, y, n)
Q1 = numbertheory.inverse_mod(r, n) * (s * R1 + (-e % n) * generator)
Pk1 = Public_key(generator, Q1)
# And the second solution
R2 = ellipticcurve.Point(curve, x, -y, n)
Q2 = numbertheory.inverse_mod(r, n) * (s * R2 + (-e % n) * generator)
Pk2 = Public_key(generator, Q2)
return [Pk1, Pk2]
class Public_key(object):
"""Public key for ECDSA.
"""
def __init__(self, generator, point):
"""generator is the Point that generates the group,
point is the Point that defines the public key.
"""
self.curve = generator.curve()
self.generator = generator
self.point = point
n = generator.order()
if not n:
raise RuntimeError("Generator point must have order.")
if not n * point == ellipticcurve.INFINITY:
raise RuntimeError("Generator point order is bad.")
if point.x() < 0 or n <= point.x() or point.y() < 0 or n <= point.y():
raise RuntimeError("Generator point has x or y out of range.")
def verifies(self, hash, signature):
"""Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = self.generator
n = G.order()
r = signature.r
s = signature.s
if r < 1 or r > n - 1:
return False
if s < 1 or s > n - 1:
return False
c = numbertheory.inverse_mod(s, n)
u1 = (hash * c) % n
u2 = (r * c) % n
xy = u1 * G + u2 * self.point
v = xy.x() % n
return v == r
class Private_key(object):
"""Private key for ECDSA.
"""
def __init__(self, public_key, secret_multiplier):
"""public_key is of class Public_key;
secret_multiplier is a large integer.
"""
self.public_key = public_key
self.secret_multiplier = secret_multiplier
def sign(self, hash, random_k):
"""Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = self.public_key.generator
n = G.order()
k = random_k % n
p1 = k * G
r = p1.x() % n
if r == 0:
raise RSZeroError("amazingly unlucky random number r")
s = (numbertheory.inverse_mod(k, n) *
(hash + (self.secret_multiplier * r) % n)) % n
if s == 0:
raise RSZeroError("amazingly unlucky random number s")
recovery_param = p1.y() % 2 or (2 if p1.x() == k else 0)
return Signature(r, s, recovery_param)
def int_to_string(x):
"""Convert integer x into a string of bytes, as per X9.62."""
assert x >= 0
if x == 0:
return b'\0'
result = []
while x:
ordinal = x & 0xFF
result.append(int.to_bytes(ordinal, 1, 'big'))
x >>= 8
result.reverse()
return b''.join(result)
def string_to_int(s):
"""Convert a string of bytes into an integer, as per X9.62."""
result = 0
for c in s:
if not isinstance(c, int):
c = ord(c)
result = 256 * result + c
return result
def digest_integer(m):
"""Convert an integer into a string of bytes, compute
its SHA-1 hash, and convert the result to an integer."""
#
# I don't expect this function to be used much. I wrote
# it in order to be able to duplicate the examples
# in ECDSAVS.
#
from hashlib import sha1
return string_to_int(sha1(int_to_string(m)).digest())
def point_is_valid(generator, x, y):
"""Is (x,y) a valid public key based on the specified generator?"""
# These are the tests specified in X9.62.
n = generator.order()
curve = generator.curve()
if x < 0 or n <= x or y < 0 or n <= y:
return False
if not curve.contains_point(x, y):
return False
if not n * ellipticcurve.Point(curve, x, y) == ellipticcurve.INFINITY:
return False
return True
# NIST Curve P-192:
_p = 6277101735386680763835789423207666416083908700390324961279
_r = 6277101735386680763835789423176059013767194773182842284081
# s = 0x3045ae6fc8422f64ed579528d38120eae12196d5L
# c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65L
_b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
_Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
_Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
curve_192 = ellipticcurve.CurveFp(_p, -3, _b)
generator_192 = ellipticcurve.Point(curve_192, _Gx, _Gy, _r)
# NIST Curve P-224:
_p = 26959946667150639794667015087019630673557916260026308143510066298881
_r = 26959946667150639794667015087019625940457807714424391721682722368061
# s = 0xbd71344799d5c7fcdc45b59fa3b9ab8f6a948bc5L
# c = 0x5b056c7e11dd68f40469ee7f3c7a7d74f7d121116506d031218291fbL
_b = 0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4
_Gx = 0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21
_Gy = 0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34
curve_224 = ellipticcurve.CurveFp(_p, -3, _b)
generator_224 = ellipticcurve.Point(curve_224, _Gx, _Gy, _r)
# NIST Curve P-256:
_p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
_r = 115792089210356248762697446949407573529996955224135760342422259061068512044369
# s = 0xc49d360886e704936a6678e1139d26b7819f7e90L
# c = 0x7efba1662985be9403cb055c75d4f7e0ce8d84a9c5114abcaf3177680104fa0dL
_b = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
_Gx = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
_Gy = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
curve_256 = ellipticcurve.CurveFp(_p, -3, _b)
generator_256 = ellipticcurve.Point(curve_256, _Gx, _Gy, _r)
# NIST Curve P-384:
_p = 39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319
_r = 39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643
# s = 0xa335926aa319a27a1d00896a6773a4827acdac73L
# c = 0x79d1e655f868f02fff48dcdee14151ddb80643c1406d0ca10dfe6fc52009540a495e8042ea5f744f6e184667cc722483L
_b = 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef
_Gx = 0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7
_Gy = 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f
curve_384 = ellipticcurve.CurveFp(_p, -3, _b)
generator_384 = ellipticcurve.Point(curve_384, _Gx, _Gy, _r)
# NIST Curve P-521:
_p = 6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151
_r = 6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449
# s = 0xd09e8800291cb85396cc6717393284aaa0da64baL
# c = 0x0b48bfa5f420a34949539d2bdfc264eeeeb077688e44fbf0ad8f6d0edb37bd6b533281000518e19f1b9ffbe0fe9ed8a3c2200b8f875e523868c70c1e5bf55bad637L
_b = 0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00
_Gx = 0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66
_Gy = 0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650
curve_521 = ellipticcurve.CurveFp(_p, -3, _b)
generator_521 = ellipticcurve.Point(curve_521, _Gx, _Gy, _r)
# Certicom secp256-k1
_a = 0x0000000000000000000000000000000000000000000000000000000000000000
_b = 0x0000000000000000000000000000000000000000000000000000000000000007
_p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
_Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
_Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
_r = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
curve_secp256k1 = ellipticcurve.CurveFp(_p, _a, _b)
generator_secp256k1 = ellipticcurve.Point(curve_secp256k1, _Gx, _Gy, _r)

View File

@@ -0,0 +1,197 @@
#! /usr/bin/env python
#
# Implementation of elliptic curves, for cryptographic applications.
#
# This module doesn't provide any way to choose a random elliptic
# curve, nor to verify that an elliptic curve was chosen randomly,
# because one can simply use NIST's standard curves.
#
# Notes from X9.62-1998 (draft):
# Nomenclature:
# - Q is a public key.
# The "Elliptic Curve Domain Parameters" include:
# - q is the "field size", which in our case equals p.
# - p is a big prime.
# - G is a point of prime order (5.1.1.1).
# - n is the order of G (5.1.1.1).
# Public-key validation (5.2.2):
# - Verify that Q is not the point at infinity.
# - Verify that X_Q and Y_Q are in [0,p-1].
# - Verify that Q is on the curve.
# - Verify that nQ is the point at infinity.
# Signature generation (5.3):
# - Pick random k from [1,n-1].
# Signature checking (5.4.2):
# - Verify that r and s are in [1,n-1].
#
# Version of 2008.11.25.
#
# Revision history:
# 2005.12.31 - Initial version.
# 2008.11.25 - Change CurveFp.is_on to contains_point.
#
# Written in 2005 by Peter Pearson and placed in the public domain.
from __future__ import division
from . import numbertheory
class CurveFp(object):
"""Elliptic Curve over the field of integers modulo a prime."""
def __init__(self, p, a, b):
"""The curve of points satisfying y^2 = x^3 + a*x + b (mod p)."""
self.__p = p
self.__a = a
self.__b = b
def p(self):
return self.__p
def a(self):
return self.__a
def b(self):
return self.__b
def contains_point(self, x, y):
"""Is the point (x,y) on this curve?"""
return (y * y - (x * x * x + self.__a * x + self.__b)) % self.__p == 0
def __str__(self):
return "CurveFp(p=%d, a=%d, b=%d)" % (self.__p, self.__a, self.__b)
class Point(object):
"""A point on an elliptic curve. Altering x and y is forbidding,
but they can be read by the x() and y() methods."""
def __init__(self, curve, x, y, order=None):
"""curve, x, y, order; order (optional) is the order of this point."""
self.__curve = curve
self.__x = x
self.__y = y
self.__order = order
# self.curve is allowed to be None only for INFINITY:
if self.__curve:
assert self.__curve.contains_point(x, y)
if order:
assert self * order == INFINITY
def __eq__(self, other):
"""Return True if the points are identical, False otherwise."""
if self.__curve == other.__curve \
and self.__x == other.__x \
and self.__y == other.__y:
return True
else:
return False
def __add__(self, other):
"""Add one point to another point."""
# X9.62 B.3:
if other == INFINITY:
return self
if self == INFINITY:
return other
assert self.__curve == other.__curve
if self.__x == other.__x:
if (self.__y + other.__y) % self.__curve.p() == 0:
return INFINITY
else:
return self.double()
p = self.__curve.p()
l = ((other.__y - self.__y) * \
numbertheory.inverse_mod(other.__x - self.__x, p)) % p
x3 = (l * l - self.__x - other.__x) % p
y3 = (l * (self.__x - x3) - self.__y) % p
return Point(self.__curve, x3, y3)
def __mul__(self, other):
"""Multiply a point by an integer."""
def leftmost_bit(x):
assert x > 0
result = 1
while result <= x:
result = 2 * result
return result // 2
e = other
if self.__order:
e = e % self.__order
if e == 0:
return INFINITY
if self == INFINITY:
return INFINITY
assert e > 0
# From X9.62 D.3.2:
e3 = 3 * e
negative_self = Point(self.__curve, self.__x, -self.__y, self.__order)
i = leftmost_bit(e3) // 2
result = self
# print_("Multiplying %s by %d (e3 = %d):" % (self, other, e3))
while i > 1:
result = result.double()
if (e3 & i) != 0 and (e & i) == 0:
result = result + self
if (e3 & i) == 0 and (e & i) != 0:
result = result + negative_self
# print_(". . . i = %d, result = %s" % ( i, result ))
i = i // 2
return result
def __rmul__(self, other):
"""Multiply a point by an integer."""
return self * other
def __str__(self):
if self == INFINITY:
return "infinity"
return "(%d,%d)" % (self.__x, self.__y)
def double(self):
"""Return a new point that is twice the old."""
if self == INFINITY:
return INFINITY
# X9.62 B.3:
p = self.__curve.p()
a = self.__curve.a()
l = ((3 * self.__x * self.__x + a) * \
numbertheory.inverse_mod(2 * self.__y, p)) % p
x3 = (l * l - 2 * self.__x) % p
y3 = (l * (self.__x - x3) - self.__y) % p
return Point(self.__curve, x3, y3)
def x(self):
return self.__x
def y(self):
return self.__y
def curve(self):
return self.__curve
def order(self):
return self.__order
# This one point is the Point At Infinity for all purposes:
INFINITY = Point(None, None, None)

View File

@@ -0,0 +1,332 @@
import binascii
from . import ecdsa
from . import der
from . import rfc6979
from .curves import NIST192p, find_curve
from .ecdsa import RSZeroError
from .util import string_to_number, number_to_string, randrange
from .util import sigencode_string, sigdecode_string
from .util import oid_ecPublicKey, encoded_oid_ecPublicKey
from hashlib import sha1
class BadSignatureError(Exception):
pass
class BadDigestError(Exception):
pass
class VerifyingKey:
def __init__(self, _error__please_use_generate=None):
if not _error__please_use_generate:
raise TypeError("Please use VerifyingKey.generate() to "
"construct me")
@classmethod
def from_public_point(klass, point, curve=NIST192p, hashfunc=sha1):
self = klass(_error__please_use_generate=True)
self.curve = curve
self.default_hashfunc = hashfunc
self.pubkey = ecdsa.Public_key(curve.generator, point)
self.pubkey.order = curve.order
return self
@classmethod
def from_string(klass, string, curve=NIST192p, hashfunc=sha1,
validate_point=True):
order = curve.order
assert (len(string) == curve.verifying_key_length), \
(len(string), curve.verifying_key_length)
xs = string[:curve.baselen]
ys = string[curve.baselen:]
assert len(xs) == curve.baselen, (len(xs), curve.baselen)
assert len(ys) == curve.baselen, (len(ys), curve.baselen)
x = string_to_number(xs)
y = string_to_number(ys)
if validate_point:
assert ecdsa.point_is_valid(curve.generator, x, y)
from . import ellipticcurve
point = ellipticcurve.Point(curve.curve, x, y, order)
return klass.from_public_point(point, curve, hashfunc)
@classmethod
def from_pem(klass, string):
return klass.from_der(der.unpem(string))
@classmethod
def from_der(klass, string):
# [[oid_ecPublicKey,oid_curve], point_str_bitstring]
s1, empty = der.remove_sequence(string)
if empty != b'':
raise der.UnexpectedDER("trailing junk after DER pubkey: %s" %
binascii.hexlify(empty))
s2, point_str_bitstring = der.remove_sequence(s1)
# s2 = oid_ecPublicKey,oid_curve
oid_pk, rest = der.remove_object(s2)
oid_curve, empty = der.remove_object(rest)
if empty != b'':
raise der.UnexpectedDER("trailing junk after DER pubkey objects: %s" %
binascii.hexlify(empty))
assert oid_pk == oid_ecPublicKey, (oid_pk, oid_ecPublicKey)
curve = find_curve(oid_curve)
point_str, empty = der.remove_bitstring(point_str_bitstring)
if empty != b'':
raise der.UnexpectedDER("trailing junk after pubkey pointstring: %s" %
binascii.hexlify(empty))
assert point_str.startswith(b'\x00\x04')
return klass.from_string(point_str[2:], curve)
@classmethod
def from_public_key_recovery(klass, signature, data, curve, hashfunc=sha1, sigdecode=sigdecode_string):
# Given a signature and corresponding message this function
# returns a list of verifying keys for this signature and message
digest = hashfunc(data).digest()
return klass.from_public_key_recovery_with_digest(signature, digest, curve, hashfunc=sha1, sigdecode=sigdecode)
@classmethod
def from_public_key_recovery_with_digest(klass, signature, digest, curve, hashfunc=sha1, sigdecode=sigdecode_string):
# Given a signature and corresponding digest this function
# returns a list of verifying keys for this signature and message
generator = curve.generator
r, s = sigdecode(signature, generator.order())
sig = ecdsa.Signature(r, s)
digest_as_number = string_to_number(digest)
pks = sig.recover_public_keys(digest_as_number, generator)
# Transforms the ecdsa.Public_key object into a VerifyingKey
verifying_keys = [klass.from_public_point(pk.point, curve, hashfunc) for pk in pks]
return verifying_keys
def to_string(self):
# VerifyingKey.from_string(vk.to_string()) == vk as long as the
# curves are the same: the curve itself is not included in the
# serialized form
order = self.pubkey.order
x_str = number_to_string(self.pubkey.point.x(), order)
y_str = number_to_string(self.pubkey.point.y(), order)
return x_str + y_str
def to_pem(self):
return der.topem(self.to_der(), "PUBLIC KEY")
def to_der(self):
order = self.pubkey.order
x_str = number_to_string(self.pubkey.point.x(), order)
y_str = number_to_string(self.pubkey.point.y(), order)
point_str = b'\x00\x04' + x_str + y_str
return der.encode_sequence(der.encode_sequence(encoded_oid_ecPublicKey,
self.curve.encoded_oid),
der.encode_bitstring(point_str))
def verify(self, signature, data, hashfunc=None, sigdecode=sigdecode_string):
hashfunc = hashfunc or self.default_hashfunc
digest = hashfunc(data).digest()
return self.verify_digest(signature, digest, sigdecode)
def verify_digest(self, signature, digest, sigdecode=sigdecode_string):
if len(digest) > self.curve.baselen:
raise BadDigestError("this curve (%s) is too short "
"for your digest (%d)" % (self.curve.name,
8 * len(digest)))
number = string_to_number(digest)
r, s = sigdecode(signature, self.pubkey.order)
sig = ecdsa.Signature(r, s)
if self.pubkey.verifies(number, sig):
return True
raise BadSignatureError
class SigningKey:
def __init__(self, _error__please_use_generate=None):
if not _error__please_use_generate:
raise TypeError("Please use SigningKey.generate() to construct me")
@classmethod
def generate(klass, curve=NIST192p, entropy=None, hashfunc=sha1):
secexp = randrange(curve.order, entropy)
return klass.from_secret_exponent(secexp, curve, hashfunc)
# to create a signing key from a short (arbitrary-length) seed, convert
# that seed into an integer with something like
# secexp=util.randrange_from_seed__X(seed, curve.order), and then pass
# that integer into SigningKey.from_secret_exponent(secexp, curve)
@classmethod
def from_secret_exponent(klass, secexp, curve=NIST192p, hashfunc=sha1):
self = klass(_error__please_use_generate=True)
self.curve = curve
self.default_hashfunc = hashfunc
self.baselen = curve.baselen
n = curve.order
assert 1 <= secexp < n
pubkey_point = curve.generator * secexp
pubkey = ecdsa.Public_key(curve.generator, pubkey_point)
pubkey.order = n
self.verifying_key = VerifyingKey.from_public_point(pubkey_point, curve,
hashfunc)
self.privkey = ecdsa.Private_key(pubkey, secexp)
self.privkey.order = n
return self
@classmethod
def from_string(klass, string, curve=NIST192p, hashfunc=sha1):
assert len(string) == curve.baselen, (len(string), curve.baselen)
secexp = string_to_number(string)
return klass.from_secret_exponent(secexp, curve, hashfunc)
@classmethod
def from_pem(klass, string, hashfunc=sha1):
# the privkey pem file has two sections: "EC PARAMETERS" and "EC
# PRIVATE KEY". The first is redundant.
if isinstance(string, str):
string = string.encode()
privkey_pem = string[string.index(b'-----BEGIN EC PRIVATE KEY-----'):]
return klass.from_der(der.unpem(privkey_pem), hashfunc)
@classmethod
def from_der(klass, string, hashfunc=sha1):
# SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
# cont[1],bitstring])
s, empty = der.remove_sequence(string)
if empty != b'':
raise der.UnexpectedDER("trailing junk after DER privkey: %s" %
binascii.hexlify(empty))
one, s = der.remove_integer(s)
if one != 1:
raise der.UnexpectedDER("expected '1' at start of DER privkey,"
" got %d" % one)
privkey_str, s = der.remove_octet_string(s)
tag, curve_oid_str, s = der.remove_constructed(s)
if tag != 0:
raise der.UnexpectedDER("expected tag 0 in DER privkey,"
" got %d" % tag)
curve_oid, empty = der.remove_object(curve_oid_str)
if empty != b'':
raise der.UnexpectedDER("trailing junk after DER privkey "
"curve_oid: %s" % binascii.hexlify(empty))
curve = find_curve(curve_oid)
# we don't actually care about the following fields
#
# tag, pubkey_bitstring, s = der.remove_constructed(s)
# if tag != 1:
# raise der.UnexpectedDER("expected tag 1 in DER privkey, got %d"
# % tag)
# pubkey_str = der.remove_bitstring(pubkey_bitstring)
# if empty != "":
# raise der.UnexpectedDER("trailing junk after DER privkey "
# "pubkeystr: %s" % binascii.hexlify(empty))
# our from_string method likes fixed-length privkey strings
if len(privkey_str) < curve.baselen:
privkey_str = b'\x00' * (curve.baselen - len(privkey_str)) + privkey_str
return klass.from_string(privkey_str, curve, hashfunc)
def to_string(self):
secexp = self.privkey.secret_multiplier
s = number_to_string(secexp, self.privkey.order)
return s
def to_pem(self):
# TODO: "BEGIN ECPARAMETERS"
return der.topem(self.to_der(), "EC PRIVATE KEY")
def to_der(self):
# SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
# cont[1],bitstring])
encoded_vk = b'\x00\x04' + self.get_verifying_key().to_string()
return der.encode_sequence(der.encode_integer(1),
der.encode_octet_string(self.to_string()),
der.encode_constructed(0, self.curve.encoded_oid),
der.encode_constructed(1, der.encode_bitstring(encoded_vk)),
)
def get_verifying_key(self):
return self.verifying_key
def sign_deterministic(self, data, hashfunc=None,
sigencode=sigencode_string,
extra_entropy=b''):
hashfunc = hashfunc or self.default_hashfunc
digest = hashfunc(data).digest()
return self.sign_digest_deterministic(
digest, hashfunc=hashfunc, sigencode=sigencode,
extra_entropy=extra_entropy)
def sign_digest_deterministic(self, digest, hashfunc=None,
sigencode=sigencode_string,
extra_entropy=b''):
"""
Calculates 'k' from data itself, removing the need for strong
random generator and producing deterministic (reproducible) signatures.
See RFC 6979 for more details.
"""
secexp = self.privkey.secret_multiplier
def simple_r_s(r, s, order, v):
return r, s, order, v
retry_gen = 0
while True:
k = rfc6979.generate_k(
self.curve.generator.order(), secexp, hashfunc, digest,
retry_gen=retry_gen, extra_entropy=extra_entropy)
try:
r, s, order, v = self.sign_digest(digest, sigencode=simple_r_s, k=k)
break
except RSZeroError:
retry_gen += 1
return sigencode(r, s, order, v)
def sign(self, data, entropy=None, hashfunc=None, sigencode=sigencode_string, k=None):
"""
hashfunc= should behave like hashlib.sha1 . The output length of the
hash (in bytes) must not be longer than the length of the curve order
(rounded up to the nearest byte), so using SHA256 with nist256p is
ok, but SHA256 with nist192p is not. (In the 2**-96ish unlikely event
of a hash output larger than the curve order, the hash will
effectively be wrapped mod n).
Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode,
or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256.
"""
hashfunc = hashfunc or self.default_hashfunc
h = hashfunc(data).digest()
return self.sign_digest(h, entropy, sigencode, k)
def sign_digest(self, digest, entropy=None, sigencode=sigencode_string, k=None):
if len(digest) > self.curve.baselen:
raise BadDigestError("this curve (%s) is too short "
"for your digest (%d)" % (self.curve.name,
8 * len(digest)))
number = string_to_number(digest)
r, s, v = self.sign_number(number, entropy, k)
return sigencode(r, s, self.privkey.order, v)
def sign_number(self, number, entropy=None, k=None):
# returns a pair of numbers
order = self.privkey.order
# privkey.sign() may raise RuntimeError in the amazingly unlikely
# (2**-192) event that r=0 or s=0, because that would leak the key.
# We could re-try with a different 'k', but we couldn't test that
# code, so I choose to allow the signature to fail instead.
# If k is set, it is used directly. In other cases
# it is generated using entropy function
if k is not None:
_k = k
else:
_k = randrange(order, entropy)
assert 1 <= _k < order
sig = self.privkey.sign(number, _k)
return sig.r, sig.s, sig.recovery_param

View File

@@ -0,0 +1,531 @@
#! /usr/bin/env python
#
# Provide some simple capabilities from number theory.
#
# Version of 2008.11.14.
#
# Written in 2005 and 2006 by Peter Pearson and placed in the public domain.
# Revision history:
# 2008.11.14: Use pow(base, exponent, modulus) for modular_exp.
# Make gcd and lcm accept arbitrarly many arguments.
from __future__ import division
from functools import reduce
import math
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class SquareRootError(Error):
pass
class NegativeExponentError(Error):
pass
def modular_exp(base, exponent, modulus):
"Raise base to exponent, reducing by modulus"
if exponent < 0:
raise NegativeExponentError("Negative exponents (%d) not allowed" \
% exponent)
return pow(base, exponent, modulus)
# result = 1L
# x = exponent
# b = base + 0L
# while x > 0:
# if x % 2 > 0: result = (result * b) % modulus
# x = x // 2
# b = (b * b) % modulus
# return result
def polynomial_reduce_mod(poly, polymod, p):
"""Reduce poly by polymod, integer arithmetic modulo p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This module has been tested only by extensive use
# in calculating modular square roots.
# Just to make this easy, require a monic polynomial:
assert polymod[-1] == 1
assert len(polymod) > 1
while len(poly) >= len(polymod):
if poly[-1] != 0:
for i in range(2, len(polymod) + 1):
poly[-i] = (poly[-i] - poly[-1] * polymod[-i]) % p
poly = poly[0:-1]
return poly
def polynomial_multiply_mod(m1, m2, polymod, p):
"""Polynomial multiplication modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This is just a seat-of-the-pants implementation.
# This module has been tested only by extensive use
# in calculating modular square roots.
# Initialize the product to zero:
prod = (len(m1) + len(m2) - 1) * [0]
# Add together all the cross-terms:
for i in range(len(m1)):
for j in range(len(m2)):
prod[i + j] = (prod[i + j] + m1[i] * m2[j]) % p
return polynomial_reduce_mod(prod, polymod, p)
def polynomial_exp_mod(base, exponent, polymod, p):
"""Polynomial exponentiation modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# Based on the Handbook of Applied Cryptography, algorithm 2.227.
# This module has been tested only by extensive use
# in calculating modular square roots.
assert exponent < p
if exponent == 0:
return [1]
G = base
k = exponent
if k % 2 == 1:
s = G
else:
s = [1]
while k > 1:
k = k // 2
G = polynomial_multiply_mod(G, G, polymod, p)
if k % 2 == 1:
s = polynomial_multiply_mod(G, s, polymod, p)
return s
def jacobi(a, n):
"""Jacobi symbol"""
# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
# This function has been tested by comparison with a small
# table printed in HAC, and by extensive use in calculating
# modular square roots.
assert n >= 3
assert n % 2 == 1
a = a % n
if a == 0:
return 0
if a == 1:
return 1
a1, e = a, 0
while a1 % 2 == 0:
a1, e = a1 // 2, e + 1
if e % 2 == 0 or n % 8 == 1 or n % 8 == 7:
s = 1
else:
s = -1
if a1 == 1:
return s
if n % 4 == 3 and a1 % 4 == 3:
s = -s
return s * jacobi(n % a1, a1)
def square_root_mod_prime(a, p):
"""Modular square root of a, mod p, p prime."""
# Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39.
# This module has been tested for all values in [0,p-1] for
# every prime p from 3 to 1229.
assert 0 <= a < p
assert 1 < p
if a == 0:
return 0
if p == 2:
return a
jac = jacobi(a, p)
if jac == -1:
raise SquareRootError("%d has no square root modulo %d" \
% (a, p))
if p % 4 == 3:
return modular_exp(a, (p + 1) // 4, p)
if p % 8 == 5:
d = modular_exp(a, (p - 1) // 4, p)
if d == 1:
return modular_exp(a, (p + 3) // 8, p)
if d == p - 1:
return (2 * a * modular_exp(4 * a, (p - 5) // 8, p)) % p
raise RuntimeError("Shouldn't get here.")
for b in range(2, p):
if jacobi(b * b - 4 * a, p) == -1:
f = (a, -b, 1)
ff = polynomial_exp_mod((0, 1), (p + 1) // 2, f, p)
assert ff[1] == 0
return ff[0]
raise RuntimeError("No b found.")
def inverse_mod(a, m):
"""Inverse of a mod m."""
if a < 0 or m <= a:
a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod(d, c) + (c,)
uc, vc, ud, vd = ud - q * uc, vd - q * vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0:
return ud
else:
return ud + m
def gcd2(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b % a, a
return b
def gcd(*a):
"""Greatest common divisor.
Usage: gcd([ 2, 4, 6 ])
or: gcd(2, 4, 6)
"""
if len(a) > 1:
return reduce(gcd2, a)
if hasattr(a[0], "__iter__"):
return reduce(gcd2, a[0])
return a[0]
def lcm2(a, b):
"""Least common multiple of two integers."""
return (a * b) // gcd(a, b)
def lcm(*a):
"""Least common multiple.
Usage: lcm([ 3, 4, 5 ])
or: lcm(3, 4, 5)
"""
if len(a) > 1:
return reduce(lcm2, a)
if hasattr(a[0], "__iter__"):
return reduce(lcm2, a[0])
return a[0]
def factorization(n):
"""Decompose n into a list of (prime,exponent) pairs."""
assert isinstance(n, int)
if n < 2:
return []
result = []
d = 2
# Test the small primes:
for d in smallprimes:
if d > n:
break
q, r = divmod(n, d)
if r == 0:
count = 1
while d <= n:
n = q
q, r = divmod(n, d)
if r != 0:
break
count = count + 1
result.append((d, count))
# If n is still greater than the last of our small primes,
# it may require further work:
if n > smallprimes[-1]:
if is_prime(n): # If what's left is prime, it's easy:
result.append((n, 1))
else: # Ugh. Search stupidly for a divisor:
d = smallprimes[-1]
while 1:
d = d + 2 # Try the next divisor.
q, r = divmod(n, d)
if q < d: # n < d*d means we're done, n = 1 or prime.
break
if r == 0: # d divides n. How many times?
count = 1
n = q
while d <= n: # As long as d might still divide n,
q, r = divmod(n, d) # see if it does.
if r != 0:
break
n = q # It does. Reduce n, increase count.
count = count + 1
result.append((d, count))
if n > 1:
result.append((n, 1))
return result
def phi(n):
"""Return the Euler totient function of n."""
assert isinstance(n, int)
if n < 3:
return 1
result = 1
ff = factorization(n)
for f in ff:
e = f[1]
if e > 1:
result = result * f[0] ** (e - 1) * (f[0] - 1)
else:
result = result * (f[0] - 1)
return result
def carmichael(n):
"""Return Carmichael function of n.
Carmichael(n) is the smallest integer x such that
m**x = 1 mod n for all m relatively prime to n.
"""
return carmichael_of_factorized(factorization(n))
def carmichael_of_factorized(f_list):
"""Return the Carmichael function of a number that is
represented as a list of (prime,exponent) pairs.
"""
if len(f_list) < 1:
return 1
result = carmichael_of_ppower(f_list[0])
for i in range(1, len(f_list)):
result = lcm(result, carmichael_of_ppower(f_list[i]))
return result
def carmichael_of_ppower(pp):
"""Carmichael function of the given power of the given prime.
"""
p, a = pp
if p == 2 and a > 2:
return 2 ** (a - 2)
else:
return (p - 1) * p ** (a - 1)
def order_mod(x, m):
"""Return the order of x in the multiplicative group mod m.
"""
# Warning: this implementation is not very clever, and will
# take a long time if m is very large.
if m <= 1:
return 0
assert gcd(x, m) == 1
z = x
result = 1
while z != 1:
z = (z * x) % m
result = result + 1
return result
def largest_factor_relatively_prime(a, b):
"""Return the largest factor of a relatively prime to b.
"""
while 1:
d = gcd(a, b)
if d <= 1:
break
b = d
while 1:
q, r = divmod(a, d)
if r > 0:
break
a = q
return a
def kinda_order_mod(x, m):
"""Return the order of x in the multiplicative group mod m',
where m' is the largest factor of m relatively prime to x.
"""
return order_mod(x, largest_factor_relatively_prime(m, x))
def is_prime(n):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes:
return True
else:
return False
if gcd(n, 2 * 3 * 5 * 7 * 11) != 1:
return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int(math.log(n, 2))
for k, tt in ((100, 27),
(150, 18),
(200, 15),
(250, 12),
(300, 9),
(350, 8),
(400, 7),
(450, 6),
(550, 5),
(650, 4),
(850, 3),
(1300, 2),
):
if n_bits < k:
break
t = tt
# Run the test t times:
s = 0
r = n - 1
while (r % 2) == 0:
s = s + 1
r = r // 2
for i in range(t):
a = smallprimes[i]
y = modular_exp(a, r, n)
if y != 1 and y != n - 1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp(y, 2, n)
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n - 1:
miller_rabin_test_count = i + 1
return False
return True
def next_prime(starting_value):
"Return the smallest prime larger than the starting value."
if starting_value < 2:
return 2
result = (starting_value + 1) | 1
while not is_prime(result):
result = result + 2
return result
smallprimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499,
503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691,
701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
839, 853, 857, 859, 863, 877, 881, 883, 887, 907,
911, 919, 929, 937, 941, 947, 953, 967, 971, 977,
983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229]
miller_rabin_test_count = 0

View File

@@ -0,0 +1,100 @@
'''
RFC 6979:
Deterministic Usage of the Digital Signature Algorithm (DSA) and
Elliptic Curve Digital Signature Algorithm (ECDSA)
http://tools.ietf.org/html/rfc6979
Many thanks to Coda Hale for his implementation in Go language:
https://github.com/codahale/rfc6979
'''
import hmac
from binascii import hexlify
from .util import number_to_string, number_to_string_crop
def bit_length(num):
# http://docs.python.org/dev/library/stdtypes.html#int.bit_length
s = bin(num) # binary representation: bin(-37) --> '-0b100101'
s = s.lstrip('-0b') # remove leading zeros and minus sign
return len(s) # len('100101') --> 6
def bits2int(data, qlen):
x = int(hexlify(data), 16)
l = len(data) * 8
if l > qlen:
return x >> (l - qlen)
return x
def bits2octets(data, order):
z1 = bits2int(data, bit_length(order))
z2 = z1 - order
if z2 < 0:
z2 = z1
return number_to_string_crop(z2, order)
# https://tools.ietf.org/html/rfc6979#section-3.2
def generate_k(order, secexp, hash_func, data, retry_gen=0, extra_entropy=b''):
'''
order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data
retry_gen - int - how many good 'k' values to skip before returning
extra_entropy - extra added data in binary form as per section-3.6 of
rfc6979
'''
qlen = bit_length(order)
holen = hash_func().digest_size
rolen = (qlen + 7) / 8
bx = number_to_string(secexp, order) + bits2octets(data, order) + \
extra_entropy
# Step B
v = b'\x01' * holen
# Step C
k = b'\x00' * holen
# Step D
k = hmac.new(k, v + b'\x00' + bx, hash_func).digest()
# Step E
v = hmac.new(k, v, hash_func).digest()
# Step F
k = hmac.new(k, v + b'\x01' + bx, hash_func).digest()
# Step G
v = hmac.new(k, v, hash_func).digest()
# Step H
while True:
# Step H1
t = b''
# Step H2
while len(t) < rolen:
v = hmac.new(k, v, hash_func).digest()
t += v
# Step H3
secret = bits2int(t, qlen)
if secret >= 1 and secret < order:
if retry_gen <= 0:
return secret
else:
retry_gen -= 1
k = hmac.new(k, v + b'\x00', hash_func).digest()
v = hmac.new(k, v, hash_func).digest()

View File

@@ -0,0 +1,266 @@
from __future__ import division
import os
import math
import binascii
from hashlib import sha256
from . import der
from .curves import orderlen
# RFC5480:
# The "unrestricted" algorithm identifier is:
# id-ecPublicKey OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
oid_ecPublicKey = (1, 2, 840, 10045, 2, 1)
encoded_oid_ecPublicKey = der.encode_oid(*oid_ecPublicKey)
def randrange(order, entropy=None):
"""Return a random integer k such that 1 <= k < order, uniformly
distributed across that range. For simplicity, this only behaves well if
'order' is fairly close (but below) a power of 256. The try-try-again
algorithm we use takes longer and longer time (on average) to complete as
'order' falls, rising to a maximum of avg=512 loops for the worst-case
(256**k)+1 . All of the standard curves behave well. There is a cutoff at
10k loops (which raises RuntimeError) to prevent an infinite loop when
something is really broken like the entropy function not working.
Note that this function is not declared to be forwards-compatible: we may
change the behavior in future releases. The entropy= argument (which
should get a callable that behaves like os.urandom) can be used to
achieve stability within a given release (for repeatable unit tests), but
should not be used as a long-term-compatible key generation algorithm.
"""
# we could handle arbitrary orders (even 256**k+1) better if we created
# candidates bit-wise instead of byte-wise, which would reduce the
# worst-case behavior to avg=2 loops, but that would be more complex. The
# change would be to round the order up to a power of 256, subtract one
# (to get 0xffff..), use that to get a byte-long mask for the top byte,
# generate the len-1 entropy bytes, generate one extra byte and mask off
# the top bits, then combine it with the rest. Requires jumping back and
# forth between strings and integers a lot.
if entropy is None:
entropy = os.urandom
assert order > 1
bytes = orderlen(order)
dont_try_forever = 10000 # gives about 2**-60 failures for worst case
while dont_try_forever > 0:
dont_try_forever -= 1
candidate = string_to_number(entropy(bytes)) + 1
if 1 <= candidate < order:
return candidate
continue
raise RuntimeError("randrange() tried hard but gave up, either something"
" is very wrong or you got realllly unlucky. Order was"
" %x" % order)
class PRNG:
# this returns a callable which, when invoked with an integer N, will
# return N pseudorandom bytes. Note: this is a short-term PRNG, meant
# primarily for the needs of randrange_from_seed__trytryagain(), which
# only needs to run it a few times per seed. It does not provide
# protection against state compromise (forward security).
def __init__(self, seed):
self.generator = self.block_generator(seed)
def __call__(self, numbytes):
a = [next(self.generator) for i in range(numbytes)]
return bytes(a)
def block_generator(self, seed):
counter = 0
while True:
for byte in sha256(("prng-%d-%s" % (counter, seed)).encode()).digest():
yield byte
counter += 1
def randrange_from_seed__overshoot_modulo(seed, order):
# hash the data, then turn the digest into a number in [1,order).
#
# We use David-Sarah Hopwood's suggestion: turn it into a number that's
# sufficiently larger than the group order, then modulo it down to fit.
# This should give adequate (but not perfect) uniformity, and simple
# code. There are other choices: try-try-again is the main one.
base = PRNG(seed)(2 * orderlen(order))
number = (int(binascii.hexlify(base), 16) % (order - 1)) + 1
assert 1 <= number < order, (1, number, order)
return number
def lsb_of_ones(numbits):
return (1 << numbits) - 1
def bits_and_bytes(order):
bits = int(math.log(order - 1, 2) + 1)
bytes = bits // 8
extrabits = bits % 8
return bits, bytes, extrabits
# the following randrange_from_seed__METHOD() functions take an
# arbitrarily-sized secret seed and turn it into a number that obeys the same
# range limits as randrange() above. They are meant for deriving consistent
# signing keys from a secret rather than generating them randomly, for
# example a protocol in which three signing keys are derived from a master
# secret. You should use a uniformly-distributed unguessable seed with about
# curve.baselen bytes of entropy. To use one, do this:
# seed = os.urandom(curve.baselen) # or other starting point
# secexp = ecdsa.util.randrange_from_seed__trytryagain(sed, curve.order)
# sk = SigningKey.from_secret_exponent(secexp, curve)
def randrange_from_seed__truncate_bytes(seed, order, hashmod=sha256):
# hash the seed, then turn the digest into a number in [1,order), but
# don't worry about trying to uniformly fill the range. This will lose,
# on average, four bits of entropy.
bits, _bytes, extrabits = bits_and_bytes(order)
if extrabits:
_bytes += 1
base = hashmod(seed).digest()[:_bytes]
base = "\x00" * (_bytes - len(base)) + base
number = 1 + int(binascii.hexlify(base), 16)
assert 1 <= number < order
return number
def randrange_from_seed__truncate_bits(seed, order, hashmod=sha256):
# like string_to_randrange_truncate_bytes, but only lose an average of
# half a bit
bits = int(math.log(order - 1, 2) + 1)
maxbytes = (bits + 7) // 8
base = hashmod(seed).digest()[:maxbytes]
base = "\x00" * (maxbytes - len(base)) + base
topbits = 8 * maxbytes - bits
if topbits:
base = int.to_bytes(ord(base[0]) & lsb_of_ones(topbits), 1, 'big') + base[1:]
number = 1 + int(binascii.hexlify(base), 16)
assert 1 <= number < order
return number
def randrange_from_seed__trytryagain(seed, order):
# figure out exactly how many bits we need (rounded up to the nearest
# bit), so we can reduce the chance of looping to less than 0.5 . This is
# specified to feed from a byte-oriented PRNG, and discards the
# high-order bits of the first byte as necessary to get the right number
# of bits. The average number of loops will range from 1.0 (when
# order=2**k-1) to 2.0 (when order=2**k+1).
assert order > 1
bits, bytes, extrabits = bits_and_bytes(order)
generate = PRNG(seed)
while True:
extrabyte = b''
if extrabits:
extrabyte = int.to_bytes(ord(generate(1)) & lsb_of_ones(extrabits), 1, 'big')
guess = string_to_number(extrabyte + generate(bytes)) + 1
if 1 <= guess < order:
return guess
def number_to_string(num, order):
l = orderlen(order)
fmt_str = "%0" + str(2 * l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
def number_to_string_crop(num, order):
l = orderlen(order)
fmt_str = "%0" + str(2 * l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
return string[:l]
def string_to_number(string):
return int(binascii.hexlify(string), 16)
def string_to_number_fixedlen(string, order):
l = orderlen(order)
assert len(string) == l, (len(string), l)
return int(binascii.hexlify(string), 16)
# these methods are useful for the sigencode= argument to SK.sign() and the
# sigdecode= argument to VK.verify(), and control how the signature is packed
# or unpacked.
def sigencode_strings(r, s, order, v=None):
r_str = number_to_string(r, order)
s_str = number_to_string(s, order)
return r_str, s_str, v
def sigencode_string(r, s, order, v=None):
# for any given curve, the size of the signature numbers is
# fixed, so just use simple concatenation
r_str, s_str, v = sigencode_strings(r, s, order)
return r_str + s_str
def sigencode_der(r, s, order, v=None):
return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
# canonical versions of sigencode methods
# these enforce low S values, by negating the value (modulo the order) if above order/2
# see CECKey::Sign() https://github.com/bitcoin/bitcoin/blob/master/src/key.cpp#L214
def sigencode_strings_canonize(r, s, order, v=None):
if s > order / 2:
s = order - s
if v is not None:
v ^= 1
return sigencode_strings(r, s, order, v)
def sigencode_string_canonize(r, s, order, v=None):
if s > order / 2:
s = order - s
if v is not None:
v ^= 1
return sigencode_string(r, s, order, v)
def sigencode_der_canonize(r, s, order, v=None):
if s > order / 2:
s = order - s
if v is not None:
v ^= 1
return sigencode_der(r, s, order, v)
def sigdecode_string(signature, order):
l = orderlen(order)
assert len(signature) == 2 * l, (len(signature), 2 * l)
r = string_to_number_fixedlen(signature[:l], order)
s = string_to_number_fixedlen(signature[l:], order)
return r, s
def sigdecode_strings(rs_strings, order):
(r_str, s_str) = rs_strings
l = orderlen(order)
assert len(r_str) == l, (len(r_str), l)
assert len(s_str) == l, (len(s_str), l)
r = string_to_number_fixedlen(r_str, order)
s = string_to_number_fixedlen(s_str, order)
return r, s
def sigdecode_der(sig_der, order):
# return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
rs_strings, empty = der.remove_sequence(sig_der)
if empty != b'':
raise der.UnexpectedDER("trailing junk after DER sig: %s" %
binascii.hexlify(empty))
r, rest = der.remove_integer(rs_strings)
s, empty = der.remove_integer(rest)
if empty != b'':
raise der.UnexpectedDER("trailing junk after DER numbers: %s" %
binascii.hexlify(empty))
return r, s

View File

@@ -0,0 +1,7 @@
from .abi import *
from .account import *
from .typing import *
from .utils import *
from .hexbytes import *
__all__ = [ 'account', 'typing', 'utils', 'abi', 'hexbytes' ]

View File

@@ -0,0 +1,16 @@
from .abi import (
decode,
decode_abi,
decode_single,
encode,
encode_abi,
encode_single,
is_encodable,
is_encodable_type,
)
# This code from: https://github.com/ethereum/eth-abi/tree/v3.0.1
__version__ = 'ccxt'
__all__ = ['decode','encode']

View File

@@ -0,0 +1,19 @@
from .codec import (
ABICodec,
)
from .registry import (
registry,
)
default_codec = ABICodec(registry)
encode = default_codec.encode
encode_abi = default_codec.encode_abi # deprecated
encode_single = default_codec.encode_single # deprecated
decode = default_codec.decode
decode_abi = default_codec.decode_abi # deprecated
decode_single = default_codec.decode_single # deprecated
is_encodable = default_codec.is_encodable
is_encodable_type = default_codec.is_encodable_type

View File

@@ -0,0 +1,152 @@
import functools
from ..typing.abi import (
TypeStr,
)
from .grammar import (
BasicType,
TupleType,
normalize,
parse,
)
def parse_type_str(expected_base=None, with_arrlist=False):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a type string with an (optional) expected base is required in
that method.
"""
def decorator(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = "{} (normalized to {})".format(
type_str_repr,
repr(normalized_type_str),
)
if expected_base is not None:
if not isinstance(abi_type, BasicType):
raise ValueError(
"Cannot create {} for non-basic type {}".format(
cls.__name__,
type_str_repr,
)
)
if abi_type.base != expected_base:
raise ValueError(
"Cannot create {} for type {}: expected type with "
"base '{}'".format(
cls.__name__,
type_str_repr,
expected_base,
)
)
if not with_arrlist and abi_type.arrlist is not None:
raise ValueError(
"Cannot create {} for type {}: expected type with "
"no array dimension list".format(
cls.__name__,
type_str_repr,
)
)
if with_arrlist and abi_type.arrlist is None:
raise ValueError(
"Cannot create {} for type {}: expected type with "
"array dimension list".format(
cls.__name__,
type_str_repr,
)
)
# Perform general validation of default solidity types
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
return decorator
def parse_tuple_type_str(old_from_type_str):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a tuple type string is required in that method.
"""
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = "{} (normalized to {})".format(
type_str_repr,
repr(normalized_type_str),
)
if not isinstance(abi_type, TupleType):
raise ValueError(
"Cannot create {} for non-tuple type {}".format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
class BaseCoder:
"""
Base class for all encoder and decoder classes.
"""
is_dynamic = False
def __init__(self, **kwargs):
cls = type(self)
# Ensure no unrecognized kwargs were given
for key, value in kwargs.items():
if not hasattr(cls, key):
raise AttributeError(
"Property {key} not found on {cls_name} class. "
"`{cls_name}.__init__` only accepts keyword arguments which are "
"present on the {cls_name} class.".format(
key=key,
cls_name=cls.__name__,
)
)
setattr(self, key, value)
# Validate given combination of kwargs
self.validate()
def validate(self):
pass
@classmethod
def from_type_str(
cls, type_str: TypeStr, registry
) -> "BaseCoder": # pragma: no cover
"""
Used by :any:`ABIRegistry` to get an appropriate encoder or decoder
instance for the given type string and type registry.
"""
raise NotImplementedError("Must implement `from_type_str`")

View File

@@ -0,0 +1,217 @@
from typing import (
Any,
Iterable,
Tuple,
)
import warnings
from ..typing.abi import (
Decodable,
TypeStr,
)
from ..utils import (
is_bytes,
)
from .decoding import (
ContextFramesBytesIO,
TupleDecoder,
)
from .encoding import (
TupleEncoder,
)
from .exceptions import (
EncodingError,
)
from .registry import (
ABIRegistry,
)
class BaseABICoder:
"""
Base class for porcelain coding APIs. These are classes which wrap
instances of :class:`~.registry.ABIRegistry` to provide last-mile
coding functionality.
"""
def __init__(self, registry: ABIRegistry):
"""
Constructor.
:param registry: The registry providing the encoders to be used when
encoding values.
"""
self._registry = registry
class ABIEncoder(BaseABICoder):
"""
Wraps a registry to provide last-mile encoding functionality.
"""
def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
"""
Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``.
"""
warnings.warn(
"abi.encode_single() and abi.encode_single_packed() are deprecated "
"and will be removed in version 4.0.0 in favor of abi.encode() and "
"abi.encode_packed(), respectively",
category=DeprecationWarning,
)
encoder = self._registry.get_encoder(typ)
return encoder(arg)
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
"""
Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``.
"""
warnings.warn(
"abi.encode_abi() and abi.encode_abi_packed() are deprecated and will be "
"removed in version 4.0.0 in favor of abi.encode() and "
"abi.encode_packed(), respectively",
category=DeprecationWarning,
)
return self.encode(types, args)
def encode(self, types, args):
encoders = [self._registry.get_encoder(type_str) for type_str in types]
encoder = TupleEncoder(encoders=encoders)
return encoder(args)
def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
"""
Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``.
"""
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True
def is_encodable_type(self, typ: TypeStr) -> bool:
"""
Returns ``True`` if values for the ABI type ``typ`` can be encoded by
this codec.
:param typ: A string representation for the ABI type that will be
checked for encodability e.g. ``'uint256'``, ``'bytes[]'``,
``'(int,int)'``, etc.
:returns: ``True`` if values for ``typ`` can be encoded by this codec.
Otherwise, ``False``.
"""
return self._registry.has_encoder(typ)
class ABIDecoder(BaseABICoder):
"""
Wraps a registry to provide last-mile decoding functionality.
"""
stream_class = ContextFramesBytesIO
def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
"""
Decodes the binary value ``data`` of the ABI type ``typ`` into its
equivalent python value.
:param typ: The string representation of the ABI type that will be used for
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
:param data: The binary value to be decoded.
:returns: The equivalent python value of the ABI value represented in
``data``.
"""
warnings.warn(
"abi.decode_single() is deprecated and will be removed in version 4.0.0 "
"in favor of abi.decode()",
category=DeprecationWarning,
)
if not is_bytes(data):
raise TypeError(
"The `data` value must be of bytes type. Got {0}".format(type(data))
)
decoder = self._registry.get_decoder(typ)
stream = self.stream_class(data)
return decoder(stream)
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
"""
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
"""
warnings.warn(
"abi.decode_abi() is deprecated and will be removed in version 4.0.0 in "
"favor of abi.decode()",
category=DeprecationWarning,
)
return self.decode(types, data)
def decode(self, types, data):
if not is_bytes(data):
raise TypeError(
f"The `data` value must be of bytes type. Got {type(data)}"
)
decoders = [self._registry.get_decoder(type_str) for type_str in types]
decoder = TupleDecoder(decoders=decoders)
stream = self.stream_class(data)
return decoder(stream)
class ABICodec(ABIEncoder, ABIDecoder):
pass

View File

@@ -0,0 +1,3 @@
TT256 = 2**256
TT256M1 = 2**256 - 1
TT255 = 2**255

View File

@@ -0,0 +1,565 @@
import abc
import decimal
import io
from typing import (
Any,
)
from ..utils import (
big_endian_to_int,
to_normalized_address,
to_tuple,
)
from .base import (
BaseCoder,
parse_tuple_type_str,
parse_type_str,
)
from .exceptions import (
DecodingError,
InsufficientDataBytes,
NonEmptyPaddingBytes,
)
from .utils.numeric import (
TEN,
abi_decimal_context,
ceil32,
)
class ContextFramesBytesIO(io.BytesIO):
"""
A byte stream which can track a series of contextual frames in a stack. This
data structure is necessary to perform nested decodings using the
:py:class:``HeadTailDecoder`` since offsets present in head sections are
relative only to a particular encoded object. These offsets can only be
used to locate a position in a decoding stream if they are paired with a
contextual offset that establishes the position of the object in which they
are found.
For example, consider the encoding of a value for the following type::
type: (int,(int,int[]))
value: (1,(2,[3,3]))
There are two tuples in this type: one inner and one outer. The inner tuple
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
This means that its value encoding will be placed in the tail section of the
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
itself, contain a tail section with the encoding for ``[3,3]``. All
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
data values are normally 32 bytes wide but have been truncated to remove the
redundant zeros at the beginnings of their encodings)::
offset data
--------------------------
^ 0 0x01
| 32 0x40 <-- Offset of object A in global frame (64)
-----|--------------------
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
| | 96 0x40 <-- Offset of object B in frame of object A (64)
-----|-Object A's frame---
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
| | 160 0x03
v v 192 0x03
--------------------------
Note that the offset of object B is encoded as 64 which only specifies the
beginning of its encoded value relative to the beginning of object A's
encoding. Globally, object B is located at offset 128. In order to make
sense out of object B's offset, it needs to be positioned in the context of
its enclosing object's frame (object A).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._frames = []
self._total_offset = 0
def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
self.seek(self._total_offset + pos, *args, **kwargs)
def push_frame(self, offset):
"""
Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset.
"""
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0)
def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError("no frames to pop")
self._total_offset -= offset
self.seek(return_pos)
class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
"""
Base class for all decoder classes. Subclass this if you want to define a
custom decoder class. Subclasses must also implement
:any:`BaseCoder.from_type_str`.
"""
@abc.abstractmethod
def decode(self, stream: ContextFramesBytesIO) -> Any: # pragma: no cover
"""
Decodes the given stream of bytes into a python value. Should raise
:any:`exceptions.DecodingError` if a python value cannot be decoded
from the given byte stream.
"""
pass
def __call__(self, stream: ContextFramesBytesIO) -> Any:
return self.decode(stream)
class HeadTailDecoder(BaseDecoder):
is_dynamic = True
tail_decoder = None
def validate(self):
super().validate()
if self.tail_decoder is None:
raise ValueError("No `tail_decoder` set")
def decode(self, stream):
start_pos = decode_uint_256(stream)
stream.push_frame(start_pos)
value = self.tail_decoder(stream)
stream.pop_frame()
return value
class TupleDecoder(BaseDecoder):
decoders = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.decoders = tuple(
HeadTailDecoder(tail_decoder=d) if getattr(d, "is_dynamic", False) else d
for d in self.decoders
)
self.is_dynamic = any(getattr(d, "is_dynamic", False) for d in self.decoders)
def validate(self):
super().validate()
if self.decoders is None:
raise ValueError("No `decoders` set")
@to_tuple
def decode(self, stream):
for decoder in self.decoders:
yield decoder(stream)
@parse_tuple_type_str
def from_type_str(cls, abi_type, registry):
decoders = tuple(
registry.get_decoder(c.to_type_str()) for c in abi_type.components
)
return cls(decoders=decoders)
class SingleDecoder(BaseDecoder):
decoder_fn = None
def validate(self):
super().validate()
if self.decoder_fn is None:
raise ValueError("No `decoder_fn` set")
def validate_padding_bytes(self, value, padding_bytes):
raise NotImplementedError("Must be implemented by subclasses")
def decode(self, stream):
raw_data = self.read_data_from_stream(stream)
data, padding_bytes = self.split_data_and_padding(raw_data)
value = self.decoder_fn(data)
self.validate_padding_bytes(value, padding_bytes)
return value
def read_data_from_stream(self, stream):
raise NotImplementedError("Must be implemented by subclasses")
def split_data_and_padding(self, raw_data):
return raw_data, b""
class BaseArrayDecoder(BaseDecoder):
item_decoder = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Use a head-tail decoder to decode dynamic elements
if self.item_decoder.is_dynamic:
self.item_decoder = HeadTailDecoder(
tail_decoder=self.item_decoder,
)
def validate(self):
super().validate()
if self.item_decoder is None:
raise ValueError("No `item_decoder` set")
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_decoder = registry.get_decoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
# If array dimension is fixed
return SizedArrayDecoder(
array_size=array_spec[0],
item_decoder=item_decoder,
)
else:
# If array dimension is dynamic
return DynamicArrayDecoder(item_decoder=item_decoder)
class SizedArrayDecoder(BaseArrayDecoder):
array_size = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = self.item_decoder.is_dynamic
@to_tuple
def decode(self, stream):
for _ in range(self.array_size):
yield self.item_decoder(stream)
class DynamicArrayDecoder(BaseArrayDecoder):
# Dynamic arrays are always dynamic, regardless of their elements
is_dynamic = True
@to_tuple
def decode(self, stream):
array_size = decode_uint_256(stream)
stream.push_frame(32)
for _ in range(array_size):
yield self.item_decoder(stream)
stream.pop_frame()
class FixedByteSizeDecoder(SingleDecoder):
decoder_fn = None
value_bit_size = None
data_byte_size = None
is_big_endian = None
def validate(self):
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be None")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be None")
if self.decoder_fn is None:
raise ValueError("`decoder_fn` may not be None")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be None")
if self.value_bit_size % 8 != 0:
raise ValueError(
"Invalid value bit size: {0}. Must be a multiple of 8".format(
self.value_bit_size,
)
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def read_data_from_stream(self, stream):
data = stream.read(self.data_byte_size)
if len(data) != self.data_byte_size:
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
self.data_byte_size,
len(data),
)
)
return data
def split_data_and_padding(self, raw_data):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if self.is_big_endian:
padding_bytes = raw_data[:padding_size]
data = raw_data[padding_size:]
else:
data = raw_data[:value_byte_size]
padding_bytes = raw_data[value_byte_size:]
return data, padding_bytes
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if padding_bytes != b"\x00" * padding_size:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
def _get_value_byte_size(self):
value_byte_size = self.value_bit_size // 8
return value_byte_size
class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
data_byte_size = 32
class BooleanDecoder(Fixed32ByteSizeDecoder):
value_bit_size = 8
is_big_endian = True
@staticmethod
def decoder_fn(data):
if data == b"\x00":
return False
elif data == b"\x01":
return True
else:
raise NonEmptyPaddingBytes(
"Boolean must be either 0x0 or 0x1. Got: {0}".format(repr(data))
)
@parse_type_str("bool")
def from_type_str(cls, abi_type, registry):
return cls()
class AddressDecoder(Fixed32ByteSizeDecoder):
value_bit_size = 20 * 8
is_big_endian = True
decoder_fn = staticmethod(to_normalized_address)
@parse_type_str("address")
def from_type_str(cls, abi_type, registry):
return cls()
#
# Unsigned Integer Decoders
#
class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
decoder_fn = staticmethod(big_endian_to_int)
is_big_endian = True
@parse_type_str("uint")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
decode_uint_256 = UnsignedIntegerDecoder(value_bit_size=256)
#
# Signed Integer Decoders
#
class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
is_big_endian = True
def decoder_fn(self, data):
value = big_endian_to_int(data)
if value >= 2 ** (self.value_bit_size - 1):
return value - 2**self.value_bit_size
else:
return value
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if value >= 0:
expected_padding_bytes = b"\x00" * padding_size
else:
expected_padding_bytes = b"\xff" * padding_size
if padding_bytes != expected_padding_bytes:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
@parse_type_str("int")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
#
# Bytes1..32
#
class BytesDecoder(Fixed32ByteSizeDecoder):
is_big_endian = False
@staticmethod
def decoder_fn(data):
return data
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub * 8)
class BaseFixedDecoder(Fixed32ByteSizeDecoder):
frac_places = None
is_big_endian = True
def validate(self):
super().validate()
if self.frac_places is None:
raise ValueError("must specify `frac_places`")
if self.frac_places <= 0 or self.frac_places > 80:
raise ValueError("`frac_places` must be in range (0, 80]")
class UnsignedFixedDecoder(BaseFixedDecoder):
def decoder_fn(self, data):
value = big_endian_to_int(data)
with decimal.localcontext(abi_decimal_context):
decimal_value = decimal.Decimal(value) / TEN**self.frac_places
return decimal_value
@parse_type_str("ufixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
class SignedFixedDecoder(BaseFixedDecoder):
def decoder_fn(self, data):
value = big_endian_to_int(data)
if value >= 2 ** (self.value_bit_size - 1):
signed_value = value - 2**self.value_bit_size
else:
signed_value = value
with decimal.localcontext(abi_decimal_context):
decimal_value = decimal.Decimal(signed_value) / TEN**self.frac_places
return decimal_value
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if value >= 0:
expected_padding_bytes = b"\x00" * padding_size
else:
expected_padding_bytes = b"\xff" * padding_size
if padding_bytes != expected_padding_bytes:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
@parse_type_str("fixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
#
# String and Bytes
#
class ByteStringDecoder(SingleDecoder):
is_dynamic = True
@staticmethod
def decoder_fn(data):
return data
@staticmethod
def read_data_from_stream(stream):
data_length = decode_uint_256(stream)
padded_length = ceil32(data_length)
data = stream.read(padded_length)
if len(data) < padded_length:
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
padded_length,
len(data),
)
)
padding_bytes = data[data_length:]
if padding_bytes != b"\x00" * (padded_length - data_length):
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
return data[:data_length]
def validate_padding_bytes(self, value, padding_bytes):
pass
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls()
class StringDecoder(ByteStringDecoder):
@parse_type_str("string")
def from_type_str(cls, abi_type, registry):
return cls()
@staticmethod
def decoder_fn(data):
try:
value = data.decode("utf-8")
except UnicodeDecodeError as e:
raise DecodingError(
e.encoding,
e.object,
e.start,
e.end,
"The returned type for this function is string which is "
"expected to be a UTF8 encoded string of text. The returned "
"value could not be decoded as valid UTF8. This is indicative "
"of a broken application which is using incorrect return types for "
"binary data.",
) from e
return value

View File

@@ -0,0 +1,720 @@
import abc
import codecs
import decimal
from itertools import (
accumulate,
)
from typing import (
Any,
Optional,
Type,
)
from ..utils import (
int_to_big_endian,
is_address,
is_boolean,
is_bytes,
is_integer,
is_list_like,
is_number,
is_text,
to_canonical_address,
)
from .base import (
BaseCoder,
parse_tuple_type_str,
parse_type_str,
)
from .exceptions import (
EncodingTypeError,
IllegalValue,
ValueOutOfBounds,
)
from .utils.numeric import (
TEN,
abi_decimal_context,
ceil32,
compute_signed_fixed_bounds,
compute_signed_integer_bounds,
compute_unsigned_fixed_bounds,
compute_unsigned_integer_bounds,
)
from .utils.padding import (
fpad,
zpad,
zpad_right,
)
from .utils.string import (
abbr,
)
class BaseEncoder(BaseCoder, metaclass=abc.ABCMeta):
"""
Base class for all encoder classes. Subclass this if you want to define a
custom encoder class. Subclasses must also implement
:any:`BaseCoder.from_type_str`.
"""
@abc.abstractmethod
def encode(self, value: Any) -> bytes: # pragma: no cover
"""
Encodes the given value as a sequence of bytes. Should raise
:any:`exceptions.EncodingError` if ``value`` cannot be encoded.
"""
pass
@abc.abstractmethod
def validate_value(self, value: Any) -> None: # pragma: no cover
"""
Checks whether or not the given value can be encoded by this encoder.
If the given value cannot be encoded, must raise
:any:`exceptions.EncodingError`.
"""
pass
@classmethod
def invalidate_value(
cls,
value: Any,
exc: Type[Exception] = EncodingTypeError,
msg: Optional[str] = None,
) -> None:
"""
Throws a standard exception for when a value is not encodable by an
encoder.
"""
raise exc(
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
rep=abbr(value),
typ=type(value),
cls=cls.__name__,
msg="" if msg is None else (": " + msg),
)
)
def __call__(self, value: Any) -> bytes:
return self.encode(value)
class TupleEncoder(BaseEncoder):
encoders = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = any(getattr(e, "is_dynamic", False) for e in self.encoders)
def validate(self):
super().validate()
if self.encoders is None:
raise ValueError("`encoders` may not be none")
def validate_value(self, value):
if not is_list_like(value):
self.invalidate_value(
value,
msg="must be list-like object such as array or tuple",
)
if len(value) != len(self.encoders):
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="value has {} items when {} were expected".format(
len(value),
len(self.encoders),
),
)
for item, encoder in zip(value, self.encoders):
try:
encoder.validate_value(item)
except AttributeError:
encoder(item)
def encode(self, values):
self.validate_value(values)
raw_head_chunks = []
tail_chunks = []
for value, encoder in zip(values, self.encoders):
if getattr(encoder, "is_dynamic", False):
raw_head_chunks.append(None)
tail_chunks.append(encoder(value))
else:
raw_head_chunks.append(encoder(value))
tail_chunks.append(b"")
head_length = sum(32 if item is None else len(item) for item in raw_head_chunks)
tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
head_chunks = tuple(
encode_uint_256(head_length + offset) if chunk is None else chunk
for chunk, offset in zip(raw_head_chunks, tail_offsets)
)
encoded_value = b"".join(head_chunks + tuple(tail_chunks))
return encoded_value
@parse_tuple_type_str
def from_type_str(cls, abi_type, registry):
encoders = tuple(
registry.get_encoder(c.to_type_str()) for c in abi_type.components
)
return cls(encoders=encoders)
class FixedSizeEncoder(BaseEncoder):
value_bit_size = None
data_byte_size = None
encode_fn = None
type_check_fn = None
is_big_endian = None
def validate(self):
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be none")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be none")
if self.encode_fn is None:
raise ValueError("`encode_fn` may not be none")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be none")
if self.value_bit_size % 8 != 0:
raise ValueError(
"Invalid value bit size: {0}. Must be a multiple of 8".format(
self.value_bit_size,
)
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def validate_value(self, value):
raise NotImplementedError("Must be implemented by subclasses")
def encode(self, value):
self.validate_value(value)
base_encoded_value = self.encode_fn(value)
if self.is_big_endian:
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
else:
padded_encoded_value = zpad_right(base_encoded_value, self.data_byte_size)
return padded_encoded_value
class Fixed32ByteSizeEncoder(FixedSizeEncoder):
data_byte_size = 32
class BooleanEncoder(Fixed32ByteSizeEncoder):
value_bit_size = 8
is_big_endian = True
@classmethod
def validate_value(cls, value):
if not is_boolean(value):
cls.invalidate_value(value)
@classmethod
def encode_fn(cls, value):
if value is True:
return b"\x01"
elif value is False:
return b"\x00"
else:
raise ValueError("Invariant")
@parse_type_str("bool")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedBooleanEncoder(BooleanEncoder):
data_byte_size = 1
class NumberEncoder(Fixed32ByteSizeEncoder):
is_big_endian = True
bounds_fn = None
illegal_value_fn = None
type_check_fn = None
def validate(self):
super().validate()
if self.bounds_fn is None:
raise ValueError("`bounds_fn` cannot be null")
if self.type_check_fn is None:
raise ValueError("`type_check_fn` cannot be null")
def validate_value(self, value):
if not self.type_check_fn(value):
self.invalidate_value(value)
illegal_value = self.illegal_value_fn is not None and self.illegal_value_fn(
value
)
if illegal_value:
self.invalidate_value(value, exc=IllegalValue)
lower_bound, upper_bound = self.bounds_fn(self.value_bit_size)
if value < lower_bound or value > upper_bound:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg=(
"Cannot be encoded in {} bits. Must be bounded "
"between [{}, {}].".format(
self.value_bit_size,
lower_bound,
upper_bound,
)
),
)
class UnsignedIntegerEncoder(NumberEncoder):
encode_fn = staticmethod(int_to_big_endian)
bounds_fn = staticmethod(compute_unsigned_integer_bounds)
type_check_fn = staticmethod(is_integer)
@parse_type_str("uint")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
encode_uint_256 = UnsignedIntegerEncoder(value_bit_size=256, data_byte_size=32)
class PackedUnsignedIntegerEncoder(UnsignedIntegerEncoder):
@parse_type_str("uint")
def from_type_str(cls, abi_type, registry):
return cls(
value_bit_size=abi_type.sub,
data_byte_size=abi_type.sub // 8,
)
class SignedIntegerEncoder(NumberEncoder):
bounds_fn = staticmethod(compute_signed_integer_bounds)
type_check_fn = staticmethod(is_integer)
def encode_fn(self, value):
return int_to_big_endian(value % (2**self.value_bit_size))
def encode(self, value):
self.validate_value(value)
base_encoded_value = self.encode_fn(value)
if value >= 0:
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
else:
padded_encoded_value = fpad(base_encoded_value, self.data_byte_size)
return padded_encoded_value
@parse_type_str("int")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
class PackedSignedIntegerEncoder(SignedIntegerEncoder):
@parse_type_str("int")
def from_type_str(cls, abi_type, registry):
return cls(
value_bit_size=abi_type.sub,
data_byte_size=abi_type.sub // 8,
)
class BaseFixedEncoder(NumberEncoder):
frac_places = None
@staticmethod
def type_check_fn(value):
return is_number(value) and not isinstance(value, float)
@staticmethod
def illegal_value_fn(value):
if isinstance(value, decimal.Decimal):
return value.is_nan() or value.is_infinite()
return False
def validate_value(self, value):
super().validate_value(value)
with decimal.localcontext(abi_decimal_context):
residue = value % (TEN**-self.frac_places)
if residue > 0:
self.invalidate_value(
value,
exc=IllegalValue,
msg="residue {} outside allowed fractional precision of {}".format(
repr(residue),
self.frac_places,
),
)
def validate(self):
super().validate()
if self.frac_places is None:
raise ValueError("must specify `frac_places`")
if self.frac_places <= 0 or self.frac_places > 80:
raise ValueError("`frac_places` must be in range (0, 80]")
class UnsignedFixedEncoder(BaseFixedEncoder):
def bounds_fn(self, value_bit_size):
return compute_unsigned_fixed_bounds(self.value_bit_size, self.frac_places)
def encode_fn(self, value):
with decimal.localcontext(abi_decimal_context):
scaled_value = value * TEN**self.frac_places
integer_value = int(scaled_value)
return int_to_big_endian(integer_value)
@parse_type_str("ufixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
frac_places=frac_places,
)
class PackedUnsignedFixedEncoder(UnsignedFixedEncoder):
@parse_type_str("ufixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
data_byte_size=value_bit_size // 8,
frac_places=frac_places,
)
class SignedFixedEncoder(BaseFixedEncoder):
def bounds_fn(self, value_bit_size):
return compute_signed_fixed_bounds(self.value_bit_size, self.frac_places)
def encode_fn(self, value):
with decimal.localcontext(abi_decimal_context):
scaled_value = value * TEN**self.frac_places
integer_value = int(scaled_value)
unsigned_integer_value = integer_value % (2**self.value_bit_size)
return int_to_big_endian(unsigned_integer_value)
def encode(self, value):
self.validate_value(value)
base_encoded_value = self.encode_fn(value)
if value >= 0:
padded_encoded_value = zpad(base_encoded_value, self.data_byte_size)
else:
padded_encoded_value = fpad(base_encoded_value, self.data_byte_size)
return padded_encoded_value
@parse_type_str("fixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
frac_places=frac_places,
)
class PackedSignedFixedEncoder(SignedFixedEncoder):
@parse_type_str("fixed")
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(
value_bit_size=value_bit_size,
data_byte_size=value_bit_size // 8,
frac_places=frac_places,
)
class AddressEncoder(Fixed32ByteSizeEncoder):
value_bit_size = 20 * 8
encode_fn = staticmethod(to_canonical_address)
is_big_endian = True
@classmethod
def validate_value(cls, value):
if not is_address(value):
cls.invalidate_value(value)
def validate(self):
super().validate()
if self.value_bit_size != 20 * 8:
raise ValueError("Addresses must be 160 bits in length")
@parse_type_str("address")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedAddressEncoder(AddressEncoder):
data_byte_size = 20
class BytesEncoder(Fixed32ByteSizeEncoder):
is_big_endian = False
def validate_value(self, value):
if not is_bytes(value):
self.invalidate_value(value)
byte_size = self.value_bit_size // 8
if len(value) > byte_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="exceeds total byte size for bytes{} encoding".format(byte_size),
)
@staticmethod
def encode_fn(value):
return value
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub * 8)
class PackedBytesEncoder(BytesEncoder):
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls(
value_bit_size=abi_type.sub * 8,
data_byte_size=abi_type.sub,
)
class ByteStringEncoder(BaseEncoder):
is_dynamic = True
@classmethod
def validate_value(cls, value):
if not is_bytes(value):
cls.invalidate_value(value)
@classmethod
def encode(cls, value):
cls.validate_value(value)
if not value:
padded_value = b"\x00" * 32
else:
padded_value = zpad_right(value, ceil32(len(value)))
encoded_size = encode_uint_256(len(value))
encoded_value = encoded_size + padded_value
return encoded_value
@parse_type_str("bytes")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedByteStringEncoder(ByteStringEncoder):
is_dynamic = False
@classmethod
def encode(cls, value):
cls.validate_value(value)
return value
class TextStringEncoder(BaseEncoder):
is_dynamic = True
@classmethod
def validate_value(cls, value):
if not is_text(value):
cls.invalidate_value(value)
@classmethod
def encode(cls, value):
cls.validate_value(value)
value_as_bytes = codecs.encode(value, "utf8")
if not value_as_bytes:
padded_value = b"\x00" * 32
else:
padded_value = zpad_right(value_as_bytes, ceil32(len(value_as_bytes)))
encoded_size = encode_uint_256(len(value_as_bytes))
encoded_value = encoded_size + padded_value
return encoded_value
@parse_type_str("string")
def from_type_str(cls, abi_type, registry):
return cls()
class PackedTextStringEncoder(TextStringEncoder):
is_dynamic = False
@classmethod
def encode(cls, value):
cls.validate_value(value)
return codecs.encode(value, "utf8")
class BaseArrayEncoder(BaseEncoder):
item_encoder = None
def validate(self):
super().validate()
if self.item_encoder is None:
raise ValueError("`item_encoder` may not be none")
def validate_value(self, value):
if not is_list_like(value):
self.invalidate_value(
value,
msg="must be list-like such as array or tuple",
)
for item in value:
self.item_encoder.validate_value(item)
def encode_elements(self, value):
self.validate_value(value)
item_encoder = self.item_encoder
tail_chunks = tuple(item_encoder(i) for i in value)
items_are_dynamic = getattr(item_encoder, "is_dynamic", False)
if not items_are_dynamic:
return b"".join(tail_chunks)
head_length = 32 * len(value)
tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
head_chunks = tuple(
encode_uint_256(head_length + offset) for offset in tail_offsets
)
return b"".join(head_chunks + tail_chunks)
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
# If array dimension is fixed
return SizedArrayEncoder(
array_size=array_spec[0],
item_encoder=item_encoder,
)
else:
# If array dimension is dynamic
return DynamicArrayEncoder(item_encoder=item_encoder)
class PackedArrayEncoder(BaseArrayEncoder):
array_size = None
def validate_value(self, value):
super().validate_value(value)
if self.array_size is not None and len(value) != self.array_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="value has {} items when {} were expected".format(
len(value),
self.array_size,
),
)
def encode(self, value):
encoded_elements = self.encode_elements(value)
return encoded_elements
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
return cls(
array_size=array_spec[0],
item_encoder=item_encoder,
)
else:
return cls(item_encoder=item_encoder)
class SizedArrayEncoder(BaseArrayEncoder):
array_size = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = self.item_encoder.is_dynamic
def validate(self):
super().validate()
if self.array_size is None:
raise ValueError("`array_size` may not be none")
def validate_value(self, value):
super().validate_value(value)
if len(value) != self.array_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg="value has {} items when {} were expected".format(
len(value),
self.array_size,
),
)
def encode(self, value):
encoded_elements = self.encode_elements(value)
return encoded_elements
class DynamicArrayEncoder(BaseArrayEncoder):
is_dynamic = True
def encode(self, value):
encoded_size = encode_uint_256(len(value))
encoded_elements = self.encode_elements(value)
encoded_value = encoded_size + encoded_elements
return encoded_value

View File

@@ -0,0 +1,139 @@
from ...parsimonious import (
ParseError
)
class EncodingError(Exception):
"""
Base exception for any error that occurs during encoding.
"""
pass
class EncodingTypeError(EncodingError):
"""
Raised when trying to encode a python value whose type is not supported for
the output ABI type.
"""
pass
class IllegalValue(EncodingError):
"""
Raised when trying to encode a python value with the correct type but with
a value that is not considered legal for the output ABI type.
Example:
.. code-block:: python
fixed128x19_encoder(Decimal('NaN')) # cannot encode NaN
"""
pass
class ValueOutOfBounds(IllegalValue):
"""
Raised when trying to encode a python value with the correct type but with
a value that appears outside the range of valid values for the output ABI
type.
Example:
.. code-block:: python
ufixed8x1_encoder(Decimal('25.6')) # out of bounds
"""
pass
class DecodingError(Exception):
"""
Base exception for any error that occurs during decoding.
"""
pass
class InsufficientDataBytes(DecodingError):
"""
Raised when there are insufficient data to decode a value for a given ABI
type.
"""
pass
class NonEmptyPaddingBytes(DecodingError):
"""
Raised when the padding bytes of an ABI value are malformed.
"""
pass
class ParseError(ParseError):
"""
Raised when an ABI type string cannot be parsed.
"""
def __str__(self):
return "Parse error at '{}' (column {}) in type string '{}'".format(
self.text[self.pos : self.pos + 5],
self.column(),
self.text,
)
class ABITypeError(ValueError):
"""
Raised when a parsed ABI type has inconsistent properties; for example,
when trying to parse the type string ``'uint7'`` (which has a bit-width
that is not congruent with zero modulo eight).
"""
pass
class PredicateMappingError(Exception):
"""
Raised when an error occurs in a registry's internal mapping.
"""
pass
class NoEntriesFound(ValueError, PredicateMappingError):
"""
Raised when no registration is found for a type string in a registry's
internal mapping.
.. warning::
In a future version of ``eth-abi``, this error class will no longer
inherit from ``ValueError``.
"""
pass
class MultipleEntriesFound(ValueError, PredicateMappingError):
"""
Raised when multiple registrations are found for a type string in a
registry's internal mapping. This error is non-recoverable and indicates
that a registry was configured incorrectly. Registrations are expected to
cover completely distinct ranges of type strings.
.. warning::
In a future version of ``eth-abi``, this error class will no longer
inherit from ``ValueError``.
"""
pass

View File

@@ -0,0 +1,443 @@
import functools
import re
from ...parsimonious import (
expressions,
ParseError,
NodeVisitor,
Grammar
)
from .exceptions import (
ABITypeError,
ParseError,
)
grammar = Grammar(
r"""
type = tuple_type / basic_type
tuple_type = components arrlist?
components = non_zero_tuple / zero_tuple
non_zero_tuple = "(" type next_type* ")"
next_type = "," type
zero_tuple = "()"
basic_type = base sub? arrlist?
base = alphas
sub = two_size / digits
two_size = (digits "x" digits)
arrlist = (const_arr / dynam_arr)+
const_arr = "[" digits "]"
dynam_arr = "[]"
alphas = ~"[A-Za-z]+"
digits = ~"[1-9][0-9]*"
"""
)
class NodeVisitor(NodeVisitor):
"""
Parsimonious node visitor which performs both parsing of type strings and
post-processing of parse trees. Parsing operations are cached.
"""
grammar = grammar
def visit_non_zero_tuple(self, node, visited_children):
# Ignore left and right parens
_, first, rest, _ = visited_children
return (first,) + rest
def visit_tuple_type(self, node, visited_children):
components, arrlist = visited_children
return TupleType(components, arrlist, node=node)
def visit_next_type(self, node, visited_children):
# Ignore comma
_, abi_type = visited_children
return abi_type
def visit_zero_tuple(self, node, visited_children):
return tuple()
def visit_basic_type(self, node, visited_children):
base, sub, arrlist = visited_children
return BasicType(base, sub, arrlist, node=node)
def visit_two_size(self, node, visited_children):
# Ignore "x"
first, _, second = visited_children
return first, second
def visit_const_arr(self, node, visited_children):
# Ignore left and right brackets
_, int_value, _ = visited_children
return (int_value,)
def visit_dynam_arr(self, node, visited_children):
return tuple()
def visit_alphas(self, node, visited_children):
return node.text
def visit_digits(self, node, visited_children):
return int(node.text)
def generic_visit(self, node, visited_children):
if isinstance(node.expr, expressions.OneOf):
# Unwrap value chosen from alternatives
return visited_children[0]
if isinstance(node.expr, expressions.Optional):
# Unwrap optional value or return `None`
if len(visited_children) != 0:
return visited_children[0]
return None
return tuple(visited_children)
@functools.lru_cache(maxsize=None)
def parse(self, type_str):
"""
Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string.
"""
if not isinstance(type_str, str):
raise TypeError(
"Can only parse string values: got {}".format(type(type_str))
)
try:
return super().parse(type_str)
except ParseError as e:
raise ParseError(e.text, e.pos, e.expr)
visitor = NodeVisitor()
class ABIType:
"""
Base class for results of type string parsing operations.
"""
__slots__ = ("arrlist", "node")
def __init__(self, arrlist=None, node=None):
self.arrlist = arrlist
"""
The list of array dimensions for a parsed type. Equal to ``None`` if
type string has no array dimensions.
"""
self.node = node
"""
The parsimonious ``Node`` instance associated with this parsed type.
Used to generate error messages for invalid types.
"""
def __repr__(self): # pragma: no cover
return "<{} {}>".format(
type(self).__qualname__,
repr(self.to_type_str()),
)
def __eq__(self, other):
# Two ABI types are equal if their string representations are equal
return type(self) is type(other) and self.to_type_str() == other.to_type_str()
def to_type_str(self): # pragma: no cover
"""
Returns the string representation of an ABI type. This will be equal to
the type string from which it was created.
"""
raise NotImplementedError("Must implement `to_type_str`")
@property
def item_type(self):
"""
If this type is an array type, equal to an appropriate
:class:`~eth_abi.grammar.ABIType` instance for the array's items.
"""
raise NotImplementedError("Must implement `item_type`")
def validate(self): # pragma: no cover
"""
Validates the properties of an ABI type against the solidity ABI spec:
https://solidity.readthedocs.io/en/develop/abi-spec.html
Raises :class:`~eth_abi.exceptions.ABITypeError` if validation fails.
"""
raise NotImplementedError("Must implement `validate`")
def invalidate(self, error_msg):
# Invalidates an ABI type with the given error message. Expects that a
# parsimonious node was provided from the original parsing operation
# that yielded this type.
node = self.node
raise ABITypeError(
"For '{comp_str}' type at column {col} "
"in '{type_str}': {error_msg}".format(
comp_str=node.text,
col=node.start + 1,
type_str=node.full_text,
error_msg=error_msg,
),
)
@property
def is_array(self):
"""
Equal to ``True`` if a type is an array type (i.e. if it has an array
dimension list). Otherwise, equal to ``False``.
"""
return self.arrlist is not None
@property
def is_dynamic(self):
"""
Equal to ``True`` if a type has a dynamically sized encoding.
Otherwise, equal to ``False``.
"""
raise NotImplementedError("Must implement `is_dynamic`")
@property
def _has_dynamic_arrlist(self):
return self.is_array and any(len(dim) == 0 for dim in self.arrlist)
class TupleType(ABIType):
"""
Represents the result of parsing a tuple type string e.g. "(int,bool)".
"""
__slots__ = ("components",)
def __init__(self, components, arrlist=None, *, node=None):
super().__init__(arrlist, node)
self.components = components
"""
A tuple of :class:`~eth_abi.grammar.ABIType` instances for each of the
tuple type's components.
"""
def to_type_str(self):
arrlist = self.arrlist
if isinstance(arrlist, tuple):
arrlist = "".join(repr(list(a)) for a in arrlist)
else:
arrlist = ""
return "({}){}".format(
",".join(c.to_type_str() for c in self.components),
arrlist,
)
@property
def item_type(self):
if not self.is_array:
raise ValueError(
"Cannot determine item type for non-array type '{}'".format(
self.to_type_str(),
)
)
return type(self)(
self.components,
self.arrlist[:-1] or None,
node=self.node,
)
def validate(self):
for c in self.components:
c.validate()
@property
def is_dynamic(self):
if self._has_dynamic_arrlist:
return True
return any(c.is_dynamic for c in self.components)
class BasicType(ABIType):
"""
Represents the result of parsing a basic type string e.g. "uint", "address",
"ufixed128x19[][2]".
"""
__slots__ = ("base", "sub")
def __init__(self, base, sub=None, arrlist=None, *, node=None):
super().__init__(arrlist, node)
self.base = base
"""The base of a basic type e.g. "uint" for "uint256" etc."""
self.sub = sub
"""
The sub type of a basic type e.g. ``256`` for "uint256" or ``(128, 18)``
for "ufixed128x18" etc. Equal to ``None`` if type string has no sub
type.
"""
def to_type_str(self):
sub, arrlist = self.sub, self.arrlist
if isinstance(sub, int):
sub = str(sub)
elif isinstance(sub, tuple):
sub = "x".join(str(s) for s in sub)
else:
sub = ""
if isinstance(arrlist, tuple):
arrlist = "".join(repr(list(a)) for a in arrlist)
else:
arrlist = ""
return self.base + sub + arrlist
@property
def item_type(self):
if not self.is_array:
raise ValueError(
"Cannot determine item type for non-array type '{}'".format(
self.to_type_str(),
)
)
return type(self)(
self.base,
self.sub,
self.arrlist[:-1] or None,
node=self.node,
)
@property
def is_dynamic(self):
if self._has_dynamic_arrlist:
return True
if self.base == "string":
return True
if self.base == "bytes" and self.sub is None:
return True
return False
def validate(self):
base, sub = self.base, self.sub
# Check validity of string type
if base == "string":
if sub is not None:
self.invalidate("string type cannot have suffix")
# Check validity of bytes type
elif base == "bytes":
if not (sub is None or isinstance(sub, int)):
self.invalidate(
"bytes type must have either no suffix or a numerical suffix"
)
if isinstance(sub, int) and sub > 32:
self.invalidate("maximum 32 bytes for fixed-length bytes")
# Check validity of integer type
elif base in ("int", "uint"):
if not isinstance(sub, int):
self.invalidate("integer type must have numerical suffix")
if sub < 8 or 256 < sub:
self.invalidate("integer size out of bounds (max 256 bits)")
if sub % 8 != 0:
self.invalidate("integer size must be multiple of 8")
# Check validity of fixed type
elif base in ("fixed", "ufixed"):
if not isinstance(sub, tuple):
self.invalidate(
"fixed type must have suffix of form <bits>x<exponent>, "
"e.g. 128x19",
)
bits, minus_e = sub
if bits < 8 or 256 < bits:
self.invalidate("fixed size out of bounds (max 256 bits)")
if bits % 8 != 0:
self.invalidate("fixed size must be multiple of 8")
if minus_e < 1 or 80 < minus_e:
self.invalidate(
"fixed exponent size out of bounds, {} must be in 1-80".format(
minus_e,
),
)
# Check validity of hash type
elif base == "hash":
if not isinstance(sub, int):
self.invalidate("hash type must have numerical suffix")
# Check validity of address type
elif base == "address":
if sub is not None:
self.invalidate("address cannot have suffix")
TYPE_ALIASES = {
"int": "int256",
"uint": "uint256",
"fixed": "fixed128x18",
"ufixed": "ufixed128x18",
"function": "bytes24",
"byte": "bytes1",
}
TYPE_ALIAS_RE = re.compile(
r"\b({})\b".format("|".join(re.escape(a) for a in TYPE_ALIASES.keys()))
)
def normalize(type_str):
"""
Normalizes a type string into its canonical version e.g. the type string
'int' becomes 'int256', etc.
:param type_str: The type string to be normalized.
:returns: The canonical version of the input type string.
"""
return TYPE_ALIAS_RE.sub(
lambda match: TYPE_ALIASES[match.group(0)],
type_str,
)
parse = visitor.parse

View File

@@ -0,0 +1,13 @@
from .codec import (
ABIEncoder,
)
from .registry import (
registry_packed,
)
default_encoder_packed = ABIEncoder(registry_packed)
encode_packed = default_encoder_packed.encode
is_encodable_packed = default_encoder_packed.is_encodable
encode_single_packed = default_encoder_packed.encode_single # deprecated
encode_abi_packed = default_encoder_packed.encode_abi # deprecated

View File

@@ -0,0 +1,643 @@
import abc
import copy
import functools
from typing import (
Any,
Callable,
Type,
Union,
)
from ..typing import (
abi,
)
from . import (
decoding,
encoding,
exceptions,
grammar,
)
from .base import (
BaseCoder,
)
from .exceptions import (
ABITypeError,
MultipleEntriesFound,
NoEntriesFound,
)
Lookup = Union[abi.TypeStr, Callable[[abi.TypeStr], bool]]
EncoderCallable = Callable[[Any], bytes]
DecoderCallable = Callable[[decoding.ContextFramesBytesIO], Any]
Encoder = Union[EncoderCallable, Type[encoding.BaseEncoder]]
Decoder = Union[DecoderCallable, Type[decoding.BaseDecoder]]
class Copyable(abc.ABC):
@abc.abstractmethod
def copy(self):
pass
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
class PredicateMapping(Copyable):
"""
Acts as a mapping from predicate functions to values. Values are retrieved
when their corresponding predicate matches a given input. Predicates can
also be labeled to facilitate removal from the mapping.
"""
def __init__(self, name):
self._name = name
self._values = {}
self._labeled_predicates = {}
def add(self, predicate, value, label=None):
if predicate in self._values:
raise ValueError(
"Matcher {} already exists in {}".format(
repr(predicate),
self._name,
)
)
if label is not None:
if label in self._labeled_predicates:
raise ValueError(
"Matcher {} with label '{}' already exists in {}".format(
repr(predicate),
label,
self._name,
),
)
self._labeled_predicates[label] = predicate
self._values[predicate] = value
def find(self, type_str):
results = tuple(
(predicate, value)
for predicate, value in self._values.items()
if predicate(type_str)
)
if len(results) == 0:
raise NoEntriesFound(
"No matching entries for '{}' in {}".format(
type_str,
self._name,
)
)
predicates, values = tuple(zip(*results))
if len(results) > 1:
predicate_reprs = ", ".join(map(repr, predicates))
raise MultipleEntriesFound(
f"Multiple matching entries for '{type_str}' in {self._name}: "
f"{predicate_reprs}. This occurs when two registrations match the "
"same type string. You may need to delete one of the "
"registrations or modify its matching behavior to ensure it "
'doesn\'t collide with other registrations. See the "Registry" '
"documentation for more information."
)
return values[0]
def remove_by_equality(self, predicate):
# Delete the predicate mapping to the previously stored value
try:
del self._values[predicate]
except KeyError:
raise KeyError(
"Matcher {} not found in {}".format(
repr(predicate),
self._name,
)
)
# Delete any label which refers to this predicate
try:
label = self._label_for_predicate(predicate)
except ValueError:
pass
else:
del self._labeled_predicates[label]
def _label_for_predicate(self, predicate):
# Both keys and values in `_labeled_predicates` are unique since the
# `add` method enforces this
for key, value in self._labeled_predicates.items():
if value is predicate:
return key
raise ValueError(
"Matcher {} not referred to by any label in {}".format(
repr(predicate),
self._name,
)
)
def remove_by_label(self, label):
try:
predicate = self._labeled_predicates[label]
except KeyError:
raise KeyError("Label '{}' not found in {}".format(label, self._name))
del self._labeled_predicates[label]
del self._values[predicate]
def remove(self, predicate_or_label):
if callable(predicate_or_label):
self.remove_by_equality(predicate_or_label)
elif isinstance(predicate_or_label, str):
self.remove_by_label(predicate_or_label)
else:
raise TypeError(
"Key to be removed must be callable or string: got {}".format(
type(predicate_or_label),
)
)
def copy(self):
cpy = type(self)(self._name)
cpy._values = copy.copy(self._values)
cpy._labeled_predicates = copy.copy(self._labeled_predicates)
return cpy
class Predicate:
"""
Represents a predicate function to be used for type matching in
``ABIRegistry``.
"""
__slots__ = tuple()
def __call__(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("Must implement `__call__`")
def __str__(self): # pragma: no cover
raise NotImplementedError("Must implement `__str__`")
def __repr__(self):
return "<{} {}>".format(type(self).__name__, self)
def __iter__(self):
for attr in self.__slots__:
yield getattr(self, attr)
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return type(self) is type(other) and tuple(self) == tuple(other)
class Equals(Predicate):
"""
A predicate that matches any input equal to `value`.
"""
__slots__ = ("value",)
def __init__(self, value):
self.value = value
def __call__(self, other):
return self.value == other
def __str__(self):
return "(== {})".format(repr(self.value))
class BaseEquals(Predicate):
"""
A predicate that matches a basic type string with a base component equal to
`value` and no array component. If `with_sub` is `True`, the type string
must have a sub component to match. If `with_sub` is `False`, the type
string must *not* have a sub component to match. If `with_sub` is None,
the type string's sub component is ignored.
"""
__slots__ = ("base", "with_sub")
def __init__(self, base, *, with_sub=None):
self.base = base
self.with_sub = with_sub
def __call__(self, type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
if isinstance(abi_type, grammar.BasicType):
if abi_type.arrlist is not None:
return False
if self.with_sub is not None:
if self.with_sub and abi_type.sub is None:
return False
if not self.with_sub and abi_type.sub is not None:
return False
return abi_type.base == self.base
# We'd reach this point if `type_str` did not contain a basic type
# e.g. if it contained a tuple type
return False
def __str__(self):
return "(base == {}{})".format(
repr(self.base),
""
if self.with_sub is None
else (" and sub is not None" if self.with_sub else " and sub is None"),
)
def has_arrlist(type_str):
"""
A predicate that matches a type string with an array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None
def is_base_tuple(type_str):
"""
A predicate that matches a tuple type with no array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None
def _clear_encoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_encoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
def _clear_decoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_decoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
class BaseRegistry:
@staticmethod
def _register(mapping, lookup, value, label=None):
if callable(lookup):
mapping.add(lookup, value, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), value, lookup)
return
raise TypeError(
"Lookup must be a callable or a value of type `str`: got {}".format(
repr(lookup),
)
)
@staticmethod
def _unregister(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
"Lookup/label must be a callable or a value of type `str`: got {}".format(
repr(lookup_or_label),
)
)
@staticmethod
def _get_registration(mapping, type_str):
try:
value = mapping.find(type_str)
except ValueError as e:
if "No matching" in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
return value
class ABIRegistry(Copyable, BaseRegistry):
def __init__(self):
self._encoders = PredicateMapping("encoder registry")
self._decoders = PredicateMapping("decoder registry")
def _get_registration(self, mapping, type_str):
coder = super()._get_registration(mapping, type_str)
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(
self, lookup: Lookup, encoder: Encoder, label: str = None
) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(
self, lookup: Lookup, decoder: Decoder, label: str = None
) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister(self._decoders, lookup_or_label)
def register(
self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str = None
) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_registration(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except (ABITypeError, NoEntriesFound):
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_registration(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
registry = ABIRegistry()
registry.register(
BaseEquals("uint"),
encoding.UnsignedIntegerEncoder,
decoding.UnsignedIntegerDecoder,
label="uint",
)
registry.register(
BaseEquals("int"),
encoding.SignedIntegerEncoder,
decoding.SignedIntegerDecoder,
label="int",
)
registry.register(
BaseEquals("address"),
encoding.AddressEncoder,
decoding.AddressDecoder,
label="address",
)
registry.register(
BaseEquals("bool"),
encoding.BooleanEncoder,
decoding.BooleanDecoder,
label="bool",
)
registry.register(
BaseEquals("ufixed"),
encoding.UnsignedFixedEncoder,
decoding.UnsignedFixedDecoder,
label="ufixed",
)
registry.register(
BaseEquals("fixed"),
encoding.SignedFixedEncoder,
decoding.SignedFixedDecoder,
label="fixed",
)
registry.register(
BaseEquals("bytes", with_sub=True),
encoding.BytesEncoder,
decoding.BytesDecoder,
label="bytes<M>",
)
registry.register(
BaseEquals("bytes", with_sub=False),
encoding.ByteStringEncoder,
decoding.ByteStringDecoder,
label="bytes",
)
registry.register(
BaseEquals("function"),
encoding.BytesEncoder,
decoding.BytesDecoder,
label="function",
)
registry.register(
BaseEquals("string"),
encoding.TextStringEncoder,
decoding.StringDecoder,
label="string",
)
registry.register(
has_arrlist,
encoding.BaseArrayEncoder,
decoding.BaseArrayDecoder,
label="has_arrlist",
)
registry.register(
is_base_tuple,
encoding.TupleEncoder,
decoding.TupleDecoder,
label="is_base_tuple",
)
registry_packed = ABIRegistry()
registry_packed.register_encoder(
BaseEquals("uint"),
encoding.PackedUnsignedIntegerEncoder,
label="uint",
)
registry_packed.register_encoder(
BaseEquals("int"),
encoding.PackedSignedIntegerEncoder,
label="int",
)
registry_packed.register_encoder(
BaseEquals("address"),
encoding.PackedAddressEncoder,
label="address",
)
registry_packed.register_encoder(
BaseEquals("bool"),
encoding.PackedBooleanEncoder,
label="bool",
)
registry_packed.register_encoder(
BaseEquals("ufixed"),
encoding.PackedUnsignedFixedEncoder,
label="ufixed",
)
registry_packed.register_encoder(
BaseEquals("fixed"),
encoding.PackedSignedFixedEncoder,
label="fixed",
)
registry_packed.register_encoder(
BaseEquals("bytes", with_sub=True),
encoding.PackedBytesEncoder,
label="bytes<M>",
)
registry_packed.register_encoder(
BaseEquals("bytes", with_sub=False),
encoding.PackedByteStringEncoder,
label="bytes",
)
registry_packed.register_encoder(
BaseEquals("function"),
encoding.PackedBytesEncoder,
label="function",
)
registry_packed.register_encoder(
BaseEquals("string"),
encoding.PackedTextStringEncoder,
label="string",
)
registry_packed.register_encoder(
has_arrlist,
encoding.PackedArrayEncoder,
label="has_arrlist",
)
registry_packed.register_encoder(
is_base_tuple,
encoding.TupleEncoder,
label="is_base_tuple",
)

View File

@@ -0,0 +1,3 @@
from ._strategies import ( # noqa: F401
get_abi_strategy,
)

View File

@@ -0,0 +1,230 @@
from typing import (
Callable,
Union,
)
from ...typing.abi import (
TypeStr,
)
from ..utils import (
to_checksum_address,
)
from hypothesis import (
strategies as st,
)
from ..grammar import (
ABIType,
normalize,
parse,
)
from ..registry import (
BaseEquals,
BaseRegistry,
Lookup,
PredicateMapping,
has_arrlist,
is_base_tuple,
)
from ..utils.numeric import (
scale_places,
)
StrategyFactory = Callable[[ABIType, "StrategyRegistry"], st.SearchStrategy]
StrategyRegistration = Union[st.SearchStrategy, StrategyFactory]
class StrategyRegistry(BaseRegistry):
def __init__(self):
self._strategies = PredicateMapping("strategy registry")
def register_strategy(
self, lookup: Lookup, registration: StrategyRegistration, label: str = None
) -> None:
self._register(self._strategies, lookup, registration, label=label)
def unregister_strategy(self, lookup_or_label: Lookup) -> None:
self._unregister(self._strategies, lookup_or_label)
def get_strategy(self, type_str: TypeStr) -> st.SearchStrategy:
"""
Returns a hypothesis strategy for the given ABI type.
:param type_str: The canonical string representation of the ABI type
for which a hypothesis strategy should be returned.
:returns: A hypothesis strategy for generating Python values that are
encodable as values of the given ABI type.
"""
registration = self._get_registration(self._strategies, type_str)
if isinstance(registration, st.SearchStrategy):
# If a hypothesis strategy was registered, just return it
return registration
else:
# Otherwise, assume the factory is a callable. Call it with the abi
# type to get an appropriate hypothesis strategy.
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
strategy = registration(abi_type, self)
return strategy
def get_uint_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits = abi_type.sub
return st.integers(
min_value=0,
max_value=2**bits - 1,
)
def get_int_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits = abi_type.sub
return st.integers(
min_value=-(2 ** (bits - 1)),
max_value=2 ** (bits - 1) - 1,
)
address_strategy = st.binary(min_size=20, max_size=20).map(to_checksum_address)
bool_strategy = st.booleans()
def get_ufixed_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits, places = abi_type.sub
return st.decimals(
min_value=0,
max_value=2**bits - 1,
places=0,
).map(scale_places(places))
def get_fixed_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
bits, places = abi_type.sub
return st.decimals(
min_value=-(2 ** (bits - 1)),
max_value=2 ** (bits - 1) - 1,
places=0,
).map(scale_places(places))
def get_bytes_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
num_bytes = abi_type.sub
return st.binary(
min_size=num_bytes,
max_size=num_bytes,
)
bytes_strategy = st.binary(min_size=0, max_size=4096)
string_strategy = st.text()
def get_array_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
item_type = abi_type.item_type
item_type_str = item_type.to_type_str()
item_strategy = registry.get_strategy(item_type_str)
last_dim = abi_type.arrlist[-1]
if len(last_dim) == 0:
# Is dynamic list. Don't restrict length.
return st.lists(item_strategy)
else:
# Is static list. Restrict length.
dim_size = last_dim[0]
return st.lists(item_strategy, min_size=dim_size, max_size=dim_size)
def get_tuple_strategy(
abi_type: ABIType, registry: StrategyRegistry
) -> st.SearchStrategy:
component_strategies = [
registry.get_strategy(comp_abi_type.to_type_str())
for comp_abi_type in abi_type.components
]
return st.tuples(*component_strategies)
strategy_registry = StrategyRegistry()
strategy_registry.register_strategy(
BaseEquals("uint"),
get_uint_strategy,
label="uint",
)
strategy_registry.register_strategy(
BaseEquals("int"),
get_int_strategy,
label="int",
)
strategy_registry.register_strategy(
BaseEquals("address", with_sub=False),
address_strategy,
label="address",
)
strategy_registry.register_strategy(
BaseEquals("bool", with_sub=False),
bool_strategy,
label="bool",
)
strategy_registry.register_strategy(
BaseEquals("ufixed"),
get_ufixed_strategy,
label="ufixed",
)
strategy_registry.register_strategy(
BaseEquals("fixed"),
get_fixed_strategy,
label="fixed",
)
strategy_registry.register_strategy(
BaseEquals("bytes", with_sub=True),
get_bytes_strategy,
label="bytes<M>",
)
strategy_registry.register_strategy(
BaseEquals("bytes", with_sub=False),
bytes_strategy,
label="bytes",
)
strategy_registry.register_strategy(
BaseEquals("function", with_sub=False),
get_bytes_strategy,
label="function",
)
strategy_registry.register_strategy(
BaseEquals("string", with_sub=False),
string_strategy,
label="string",
)
strategy_registry.register_strategy(
has_arrlist,
get_array_strategy,
label="has_arrlist",
)
strategy_registry.register_strategy(
is_base_tuple,
get_tuple_strategy,
label="is_base_tuple",
)
get_abi_strategy = strategy_registry.get_strategy

View File

@@ -0,0 +1,83 @@
import decimal
from typing import (
Callable,
Tuple,
)
ABI_DECIMAL_PREC = 999
abi_decimal_context = decimal.Context(prec=ABI_DECIMAL_PREC)
ZERO = decimal.Decimal(0)
TEN = decimal.Decimal(10)
def ceil32(x: int) -> int:
return x if x % 32 == 0 else x + 32 - (x % 32)
def compute_unsigned_integer_bounds(num_bits: int) -> Tuple[int, int]:
return (
0,
2**num_bits - 1,
)
def compute_signed_integer_bounds(num_bits: int) -> Tuple[int, int]:
return (
-1 * 2 ** (num_bits - 1),
2 ** (num_bits - 1) - 1,
)
def compute_unsigned_fixed_bounds(
num_bits: int,
frac_places: int,
) -> Tuple[decimal.Decimal, decimal.Decimal]:
int_upper = compute_unsigned_integer_bounds(num_bits)[1]
with decimal.localcontext(abi_decimal_context):
upper = decimal.Decimal(int_upper) * TEN**-frac_places
return ZERO, upper
def compute_signed_fixed_bounds(
num_bits: int,
frac_places: int,
) -> Tuple[decimal.Decimal, decimal.Decimal]:
int_lower, int_upper = compute_signed_integer_bounds(num_bits)
with decimal.localcontext(abi_decimal_context):
exp = TEN**-frac_places
lower = decimal.Decimal(int_lower) * exp
upper = decimal.Decimal(int_upper) * exp
return lower, upper
def scale_places(places: int) -> Callable[[decimal.Decimal], decimal.Decimal]:
"""
Returns a function that shifts the decimal point of decimal values to the
right by ``places`` places.
"""
if not isinstance(places, int):
raise ValueError(
f"Argument `places` must be int. Got value {places} "
f"of type {type(places)}.",
)
with decimal.localcontext(abi_decimal_context):
scaling_factor = TEN**-places
def f(x: decimal.Decimal) -> decimal.Decimal:
with decimal.localcontext(abi_decimal_context):
return x * scaling_factor
places_repr = f"Eneg{places}" if places > 0 else f"Epos{-places}"
func_name = f"scale_by_{places_repr}"
f.__name__ = func_name
f.__qualname__ = func_name
return f

View File

@@ -0,0 +1,27 @@
from ...utils.toolz import (
curry,
)
@curry
def zpad(value: bytes, length: int) -> bytes:
return value.rjust(length, b"\x00")
zpad32 = zpad(length=32)
@curry
def zpad_right(value: bytes, length: int) -> bytes:
return value.ljust(length, b"\x00")
zpad32_right = zpad_right(length=32)
@curry
def fpad(value: bytes, length: int) -> bytes:
return value.rjust(length, b"\xff")
fpad32 = fpad(length=32)

View File

@@ -0,0 +1,19 @@
from typing import (
Any,
)
def abbr(value: Any, limit: int = 79) -> str:
"""
Converts a value into its string representation and abbreviates that
representation based on the given length `limit` if necessary.
"""
rep = repr(value)
if len(rep) > limit:
if limit < 3:
raise ValueError("Abbreviation limit may not be less than 3")
rep = rep[: limit - 3] + "..."
return rep

View File

@@ -0,0 +1,3 @@
from .messages import *
__all__ = ["messages"]

View File

@@ -0,0 +1,4 @@
from .encoding_and_hashing import (
hash_domain,
hash_eip712_message,
)

View File

@@ -0,0 +1,239 @@
from typing import (
Any,
Dict,
List,
Tuple,
Union,
)
from ...abi import (
encode,
)
from ....keccak import (
SHA3 as keccak
)
from ...utils import (
to_bytes,
to_int,
)
from .helpers import (
EIP712_SOLIDITY_TYPES,
is_0x_prefixed_hexstr,
is_array_type,
parse_core_array_type,
parse_parent_array_type,
)
def get_primary_type(types: Dict[str, List[Dict[str, str]]]) -> str:
custom_types = set(types.keys())
custom_types_that_are_deps = set()
for type_ in custom_types:
type_fields = types[type_]
for field in type_fields:
parsed_type = parse_core_array_type(field["type"])
if parsed_type in custom_types and parsed_type != type_:
custom_types_that_are_deps.add(parsed_type)
primary_type = list(custom_types.difference(custom_types_that_are_deps))
if len(primary_type) == 1:
return primary_type[0]
else:
raise ValueError("Unable to determine primary type")
def encode_field(
types: Dict[str, List[Dict[str, str]]],
name: str,
type_: str,
value: Any,
) -> Tuple[str, Union[int, bytes]]:
if type_ in types.keys():
# type is a custom type
if value is None:
return ("bytes32", b"\x00" * 32)
else:
return ("bytes32", keccak(encode_data(type_, types, value)))
elif type_ in ["string", "bytes"] and value is None:
return ("bytes32", b"")
# None is allowed only for custom and dynamic types
elif value is None:
raise ValueError(f"Missing value for field `{name}` of type `{type_}`")
elif is_array_type(type_):
# handle array type with non-array value
if not isinstance(value, list):
raise ValueError(
f"Invalid value for field `{name}` of type `{type_}`: "
f"expected array, got `{value}` of type `{type(value)}`"
)
parsed_type = parse_parent_array_type(type_)
type_value_pairs = [
encode_field(types, name, parsed_type, item) for item in value
]
if not type_value_pairs:
# the keccak hash of `encode((), ())`
return (
"bytes32",
b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", # noqa: E501
)
data_types, data_hashes = zip(*type_value_pairs)
return ("bytes32", keccak(encode(data_types, data_hashes)))
elif type_ == "bool":
return (type_, bool(value))
# all bytes types allow hexstr and str values
elif type_.startswith("bytes"):
if not isinstance(value, bytes):
if is_0x_prefixed_hexstr(value):
value = to_bytes(hexstr=value)
elif isinstance(value, str):
value = to_bytes(text=value)
else:
if isinstance(value, int) and value < 0:
value = 0
value = to_bytes(value)
return (
# keccak hash if dynamic `bytes` type
("bytes32", keccak(value))
if type_ == "bytes"
# if fixed bytesXX type, do not hash
else (type_, value)
)
elif type_ == "string":
if isinstance(value, int):
value = to_bytes(value)
else:
value = to_bytes(text=value)
return ("bytes32", keccak(value))
# allow string values for int and uint types
elif type(value) == str and type_.startswith(("int", "uint")):
if is_0x_prefixed_hexstr(value):
return (type_, to_int(hexstr=value))
else:
return (type_, to_int(text=value))
return (type_, value)
def find_type_dependencies(type_, types, results=None):
if results is None:
results = set()
# a type must be a string
if not isinstance(type_, str):
raise ValueError(
"Invalid find_type_dependencies input: expected string, got "
f"`{type_}` of type `{type(type_)}`"
)
# get core type if it's an array type
type_ = parse_core_array_type(type_)
if (
# don't look for dependencies of solidity types
type_ in EIP712_SOLIDITY_TYPES
# found a type that's already been added
or type_ in results
):
return results
# found a type that isn't defined
elif type_ not in types:
raise ValueError(f"No definition of type `{type_}`")
results.add(type_)
for field in types[type_]:
find_type_dependencies(field["type"], types, results)
return results
def encode_type(type_: str, types: Dict[str, List[Dict[str, str]]]) -> str:
result = ""
unsorted_deps = find_type_dependencies(type_, types)
if type_ in unsorted_deps:
unsorted_deps.remove(type_)
deps = [type_] + sorted(list(unsorted_deps))
for type_ in deps:
children_list = []
for child in types[type_]:
child_type = child["type"]
child_name = child["name"]
children_list.append(f"{child_type} {child_name}")
result += f"{type_}({','.join(children_list)})"
return result
def hash_type(type_: str, types: Dict[str, List[Dict[str, str]]]) -> bytes:
return keccak(to_bytes(text=encode_type(type_, types)))
def encode_data(
type_: str,
types: Dict[str, List[Dict[str, str]]],
data: Dict[str, Any],
) -> bytes:
encoded_types: List[str] = ["bytes32"]
encoded_values: List[Union[bytes, int]] = [hash_type(type_, types)]
for field in types[type_]:
type, value = encode_field(
types, field["name"], field["type"], data.get(field["name"])
)
encoded_types.append(type)
encoded_values.append(value)
return encode(encoded_types, encoded_values)
def hash_struct(
type_: str,
types: Dict[str, List[Dict[str, str]]],
data: Dict[str, Any],
) -> bytes:
encoded = encode_data(type_, types, data)
return keccak(encoded)
def hash_eip712_message(
# returns the same hash as `hash_struct`, but automatically determines primary type
message_types: Dict[str, List[Dict[str, str]]],
message_data: Dict[str, Any],
) -> bytes:
primary_type = get_primary_type(message_types)
return keccak(encode_data(primary_type, message_types, message_data))
def hash_domain(domain_data: Dict[str, Any]) -> bytes:
eip712_domain_map = {
"name": {"name": "name", "type": "string"},
"version": {"name": "version", "type": "string"},
"chainId": {"name": "chainId", "type": "uint256"},
"verifyingContract": {"name": "verifyingContract", "type": "address"},
"salt": {"name": "salt", "type": "bytes32"},
}
for k in domain_data.keys():
if k not in eip712_domain_map.keys():
raise ValueError(f"Invalid domain key: `{k}`")
domain_types = {
"EIP712Domain": [
eip712_domain_map[k] for k in eip712_domain_map.keys() if k in domain_data
]
}
return hash_struct("EIP712Domain", domain_types, domain_data)

View File

@@ -0,0 +1,40 @@
from typing import (
Any,
)
from ...utils import (
is_hexstr,
)
def _get_eip712_solidity_types():
types = ["bool", "address", "string", "bytes", "uint", "int"]
ints = [f"int{(x + 1) * 8}" for x in range(32)]
uints = [f"uint{(x + 1) * 8}" for x in range(32)]
bytes_ = [f"bytes{x + 1}" for x in range(32)]
return types + ints + uints + bytes_
EIP712_SOLIDITY_TYPES = _get_eip712_solidity_types()
def is_array_type(type_: str) -> bool:
return type_.endswith("]")
def is_0x_prefixed_hexstr(value: Any) -> bool:
return is_hexstr(value) and value.startswith("0x")
# strip all brackets: Person[][] -> Person
def parse_core_array_type(type_: str) -> str:
if is_array_type(type_):
type_ = type_[: type_.index("[")]
return type_
# strip only last set of brackets: Person[3][1] -> Person[3]
def parse_parent_array_type(type_: str) -> str:
if is_array_type(type_):
type_ = type_[: type_.rindex("[")]
return type_

View File

@@ -0,0 +1,263 @@
from collections.abc import (
Mapping,
)
from typing import (
Any,
Dict,
NamedTuple,
)
import warnings
from ..typing import (
Address,
)
from ..utils.curried import (
ValidationError,
)
from ..hexbytes import (
HexBytes,
)
from .encode_typed_data.encoding_and_hashing import (
hash_domain,
hash_eip712_message,
)
# watch for updates to signature format
class SignableMessage(NamedTuple):
"""
A message compatible with EIP-191_ that is ready to be signed.
The properties are components of an EIP-191_ signable message. Other message formats
can be encoded into this format for easy signing. This data structure doesn't need
to know about the original message format. For example, you can think of
EIP-712 as compiling down to an EIP-191 message.
In typical usage, you should never need to create these by hand. Instead, use
one of the available encode_* methods in this module, like:
- :meth:`encode_typed_data`
.. _EIP-191: https://eips.ethereum.org/EIPS/eip-191
"""
version: bytes # must be length 1
header: bytes # aka "version specific data"
body: bytes # aka "data to sign"
def encode_typed_data(
domain_data: Dict[str, Any] = None,
message_types: Dict[str, Any] = None,
message_data: Dict[str, Any] = None,
full_message: Dict[str, Any] = None,
) -> SignableMessage:
r"""
Encode an EIP-712_ message in a manner compatible with other implementations
in use, such as the Metamask and Ethers ``signTypedData`` functions.
See the `EIP-712 spec <https://eips.ethereum.org/EIPS/eip-712>`_ for more information.
You may supply the information to be encoded in one of two ways:
As exactly three arguments:
- ``domain_data``, a dict of the EIP-712 domain data
- ``message_types``, a dict of custom types (do not include a ``EIP712Domain``
key)
- ``message_data``, a dict of the data to be signed
Or as a single argument:
- ``full_message``, a dict containing the following keys:
- ``types``, a dict of custom types (may include a ``EIP712Domain`` key)
- ``primaryType``, (optional) a string of the primary type of the message
- ``domain``, a dict of the EIP-712 domain data
- ``message``, a dict of the data to be signed
.. WARNING:: Note that this code has not gone through an external audit, and
the test cases are incomplete.
Type Coercion:
- For fixed-size bytes types, smaller values will be padded to fit in larger
types, but values larger than the type will raise ``ValueOutOfBounds``.
e.g., an 8-byte value will be padded to fit a ``bytes16`` type, but 16-byte
value provided for a ``bytes8`` type will raise an error.
- Fixed-size and dynamic ``bytes`` types will accept ``int``s. Any negative
values will be converted to ``0`` before being converted to ``bytes``
- ``int`` and ``uint`` types will also accept strings. If prefixed with ``"0x"``
, the string will be interpreted as hex. Otherwise, it will be interpreted as
decimal.
Noteable differences from ``signTypedData``:
- Custom types that are not alphanumeric will encode differently.
- Custom types that are used but not defined in ``types`` will not encode.
:param domain_data: EIP712 domain data
:param message_types: custom types used by the `value` data
:param message_data: data to be signed
:param full_message: a dict containing all data and types
:returns: a ``SignableMessage``, an encoded message ready to be signed
.. doctest:: python
>>> # examples of basic usage
>>> from eth_account import Account
>>> from .messages import encode_typed_data
>>> # 3-argument usage
>>> # all domain properties are optional
>>> domain_data = {
... "name": "Ether Mail",
... "version": "1",
... "chainId": 1,
... "verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC",
... "salt": b"decafbeef",
... }
>>> # custom types
>>> message_types = {
... "Person": [
... {"name": "name", "type": "string"},
... {"name": "wallet", "type": "address"},
... ],
... "Mail": [
... {"name": "from", "type": "Person"},
... {"name": "to", "type": "Person"},
... {"name": "contents", "type": "string"},
... ],
... }
>>> # the data to be signed
>>> message_data = {
... "from": {
... "name": "Cow",
... "wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826",
... },
... "to": {
... "name": "Bob",
... "wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB",
... },
... "contents": "Hello, Bob!",
... }
>>> key = "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
>>> signable_message = encode_typed_data(domain_data, message_types, message_data)
>>> signed_message = Account.sign_message(signable_message, key)
>>> signed_message.messageHash
HexBytes('0xc5bb16ccc59ae9a3ad1cb8343d4e3351f057c994a97656e1aff8c134e56f7530')
>>> # the message can be signed in one step using Account.sign_typed_data
>>> signed_typed_data = Account.sign_typed_data(key, domain_data, message_types, message_data)
>>> signed_typed_data == signed_message
True
>>> # 1-argument usage
>>> # all domain properties are optional
>>> full_message = {
... "types": {
... "EIP712Domain": [
... {"name": "name", "type": "string"},
... {"name": "version", "type": "string"},
... {"name": "chainId", "type": "uint256"},
... {"name": "verifyingContract", "type": "address"},
... {"name": "salt", "type": "bytes32"},
... ],
... "Person": [
... {"name": "name", "type": "string"},
... {"name": "wallet", "type": "address"},
... ],
... "Mail": [
... {"name": "from", "type": "Person"},
... {"name": "to", "type": "Person"},
... {"name": "contents", "type": "string"},
... ],
... },
... "primaryType": "Mail",
... "domain": {
... "name": "Ether Mail",
... "version": "1",
... "chainId": 1,
... "verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC",
... "salt": b"decafbeef"
... },
... "message": {
... "from": {
... "name": "Cow",
... "wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
... },
... "to": {
... "name": "Bob",
... "wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
... },
... "contents": "Hello, Bob!",
... },
... }
>>> signable_message_2 = encode_typed_data(full_message=full_message)
>>> signed_message_2 = Account.sign_message(signable_message_2, key)
>>> signed_message_2.messageHash
HexBytes('0xc5bb16ccc59ae9a3ad1cb8343d4e3351f057c994a97656e1aff8c134e56f7530')
>>> signed_message_2 == signed_message
True
>>> # the full_message can be signed in one step using Account.sign_typed_data
>>> signed_typed_data_2 = Account.sign_typed_data(key, domain_data, message_types, message_data)
>>> signed_typed_data_2 == signed_message_2
True
.. _EIP-712: https://eips.ethereum.org/EIPS/eip-712
""" # noqa: E501
if full_message is not None:
if (
domain_data is not None
or message_types is not None
or message_data is not None
):
raise ValueError(
"You may supply either `full_message` as a single argument or "
"`domain_data`, `message_types`, and `message_data` as three arguments,"
" but not both."
)
full_message_types = full_message["types"].copy()
full_message_domain = full_message["domain"].copy()
# If EIP712Domain types were provided, check that they match the domain data
if "EIP712Domain" in full_message_types:
domain_data_keys = list(full_message_domain.keys())
domain_types_keys = [
field["name"] for field in full_message_types["EIP712Domain"]
]
if set(domain_data_keys) != (set(domain_types_keys)):
raise ValidationError(
"The fields provided in `domain` do not match the fields provided"
" in `types.EIP712Domain`. The fields provided in `domain` were"
f" `{domain_data_keys}`, but the fields provided in "
f"`types.EIP712Domain` were `{domain_types_keys}`."
)
full_message_types.pop("EIP712Domain", None)
# If primaryType was provided, check that it matches the derived primaryType
if "primaryType" in full_message:
derived_primary_type = get_primary_type(full_message_types)
provided_primary_type = full_message["primaryType"]
if derived_primary_type != provided_primary_type:
raise ValidationError(
"The provided `primaryType` does not match the derived "
"`primaryType`. The provided `primaryType` was "
f"`{provided_primary_type}`, but the derived `primaryType` was "
f"`{derived_primary_type}`."
)
parsed_domain_data = full_message_domain
parsed_message_types = full_message_types
parsed_message_data = full_message["message"]
else:
parsed_domain_data = domain_data
parsed_message_types = message_types
parsed_message_data = message_data
return SignableMessage(
HexBytes(b"\x01"),
hash_domain(parsed_domain_data),
hash_eip712_message(parsed_message_types, parsed_message_data),
)

View File

@@ -0,0 +1,5 @@
from .main import (
HexBytes,
)
__all__ = ["HexBytes"]

View File

@@ -0,0 +1,54 @@
import binascii
from typing import (
Union,
)
def to_bytes(val: Union[bool, bytearray, bytes, int, str, memoryview]) -> bytes:
"""
Equivalent to: `eth_utils.hexstr_if_str(eth_utils.to_bytes, val)` .
Convert a hex string, integer, or bool, to a bytes representation.
Alternatively, pass through bytes or bytearray as a bytes value.
"""
if isinstance(val, bytes):
return val
elif isinstance(val, str):
return hexstr_to_bytes(val)
elif isinstance(val, bytearray):
return bytes(val)
elif isinstance(val, bool):
return b"\x01" if val else b"\x00"
elif isinstance(val, int):
# Note that this int check must come after the bool check, because
# isinstance(True, int) is True
if val < 0:
raise ValueError(f"Cannot convert negative integer {val} to bytes")
else:
return to_bytes(hex(val))
elif isinstance(val, memoryview):
return bytes(val)
else:
raise TypeError(f"Cannot convert {val!r} of type {type(val)} to bytes")
def hexstr_to_bytes(hexstr: str) -> bytes:
if hexstr.startswith(("0x", "0X")):
non_prefixed_hex = hexstr[2:]
else:
non_prefixed_hex = hexstr
# if the hex string is odd-length, then left-pad it to an even length
if len(hexstr) % 2:
padded_hex = "0" + non_prefixed_hex
else:
padded_hex = non_prefixed_hex
try:
ascii_hex = padded_hex.encode("ascii")
except UnicodeDecodeError:
raise ValueError(
f"hex string {padded_hex} may only contain [0-9a-fA-F] characters"
)
else:
return binascii.unhexlify(ascii_hex)

View File

@@ -0,0 +1,65 @@
import sys
from typing import (
TYPE_CHECKING,
Type,
Union,
cast,
overload,
)
from ._utils import (
to_bytes,
)
if TYPE_CHECKING:
from typing import (
SupportsIndex,
)
BytesLike = Union[bool, bytearray, bytes, int, str, memoryview]
class HexBytes(bytes):
"""
HexBytes is a *very* thin wrapper around the python built-in :class:`bytes` class.
It has these three changes:
1. Accepts more initializing values, like hex strings, non-negative integers,
and booleans
2. Returns hex with prefix '0x' from :meth:`HexBytes.hex`
3. The representation at console is in hex
"""
def __new__(cls: Type[bytes], val: BytesLike) -> "HexBytes":
bytesval = to_bytes(val)
return cast(HexBytes, super().__new__(cls, bytesval)) # type: ignore # https://github.com/python/typeshed/issues/2630 # noqa: E501
def hex(
self, sep: Union[str, bytes] = None, bytes_per_sep: "SupportsIndex" = 1
) -> str:
"""
Output hex-encoded bytes, with an "0x" prefix.
Everything following the "0x" is output exactly like :meth:`bytes.hex`.
"""
return "0x" + super().hex()
@overload
def __getitem__(self, key: "SupportsIndex") -> int: # noqa: F811
...
@overload # noqa: F811
def __getitem__(self, key: slice) -> "HexBytes": # noqa: F811
...
def __getitem__( # noqa: F811
self, key: Union["SupportsIndex", slice]
) -> Union[int, bytes, "HexBytes"]:
result = super().__getitem__(key)
if hasattr(result, "hex"):
return type(self)(result)
else:
return result
def __repr__(self) -> str:
return f"HexBytes({self.hex()!r})"

View File

@@ -0,0 +1,63 @@
from importlib.metadata import (
version as __version,
)
from .abi import (
Decodable,
TypeStr,
)
from .bls import (
BLSPrivateKey,
BLSPubkey,
BLSSignature,
)
from .discovery import (
NodeID,
)
from .encoding import (
HexStr,
Primitives,
)
from .enums import (
ForkName,
)
from .ethpm import (
URI,
ContractName,
Manifest,
)
from .evm import (
Address,
AnyAddress,
BlockIdentifier,
BlockNumber,
ChecksumAddress,
Hash32,
HexAddress,
)
from .networks import (
ChainId,
)
__all__ = (
"Decodable",
"TypeStr",
"BLSPrivateKey",
"BLSPubkey",
"BLSSignature",
"NodeID",
"HexStr",
"Primitives",
"ForkName",
"ChainId",
"URI",
"ContractName",
"Manifest",
"Address",
"AnyAddress",
"BlockIdentifier",
"BlockNumber",
"ChecksumAddress",
"Hash32",
"HexAddress",
)

View File

@@ -0,0 +1,6 @@
from typing import (
Union,
)
TypeStr = str
Decodable = Union[bytes, bytearray]

View File

@@ -0,0 +1,7 @@
from typing import (
NewType,
)
BLSPubkey = NewType("BLSPubkey", bytes) # bytes48
BLSPrivateKey = NewType("BLSPrivateKey", int)
BLSSignature = NewType("BLSSignature", bytes) # bytes96

View File

@@ -0,0 +1,5 @@
from typing import (
NewType,
)
NodeID = NewType("NodeID", bytes)

View File

@@ -0,0 +1,7 @@
from typing import (
NewType,
Union,
)
HexStr = NewType("HexStr", str)
Primitives = Union[bytes, int, bool]

View File

@@ -0,0 +1,17 @@
class ForkName:
Frontier = "Frontier"
Homestead = "Homestead"
EIP150 = "EIP150"
EIP158 = "EIP158"
Byzantium = "Byzantium"
Constantinople = "Constantinople"
Metropolis = "Metropolis"
ConstantinopleFix = "ConstantinopleFix"
Istanbul = "Istanbul"
Berlin = "Berlin"
London = "London"
ArrowGlacier = "ArrowGlacier"
GrayGlacier = "GrayGlacier"
Paris = "Paris"
Shanghai = "Shanghai"
Cancun = "Cancun"

View File

@@ -0,0 +1,9 @@
from typing import (
Any,
Dict,
NewType,
)
ContractName = NewType("ContractName", str)
Manifest = NewType("Manifest", Dict[str, Any])
URI = NewType("URI", str)

View File

@@ -0,0 +1,20 @@
from typing import (
Literal,
NewType,
TypeVar,
Union,
)
from .encoding import (
HexStr,
)
Hash32 = NewType("Hash32", bytes)
BlockNumber = NewType("BlockNumber", int)
BlockParams = Literal["latest", "earliest", "pending", "safe", "finalized"]
BlockIdentifier = Union[BlockParams, BlockNumber, Hash32, HexStr, int]
Address = NewType("Address", bytes)
HexAddress = NewType("HexAddress", HexStr)
ChecksumAddress = NewType("ChecksumAddress", HexAddress)
AnyAddress = TypeVar("AnyAddress", Address, HexAddress, ChecksumAddress)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
from importlib.metadata import (
version as __version,
)
# from .abi import (
# event_abi_to_log_topic,
# event_signature_to_log_topic,
# function_abi_to_4byte_selector,
# function_signature_to_4byte_selector,
# )
from .address import (
is_address,
is_binary_address,
is_canonical_address,
is_checksum_address,
is_checksum_formatted_address,
is_hex_address,
is_normalized_address,
is_same_address,
to_canonical_address,
to_checksum_address,
to_normalized_address,
)
from .applicators import (
apply_formatter_at_index,
apply_formatter_if,
apply_formatter_to_array,
apply_formatters_to_dict,
apply_formatters_to_sequence,
apply_key_map,
apply_one_of_formatters,
combine_argument_formatters,
)
from .conversions import (
hexstr_if_str,
text_if_str,
to_bytes,
to_hex,
to_int,
to_text,
)
from .currency import (
denoms,
from_wei,
to_wei,
)
from .decorators import (
combomethod,
replace_exceptions,
)
from .encoding import (
big_endian_to_int,
int_to_big_endian,
)
from .exceptions import (
ValidationError,
)
from .functional import (
apply_to_return_value,
flatten_return,
reversed_return,
sort_return,
to_dict,
to_list,
to_ordered_dict,
to_set,
to_tuple,
)
from .hexadecimal import (
add_0x_prefix,
decode_hex,
encode_hex,
is_0x_prefixed,
is_hex,
is_hexstr,
remove_0x_prefix,
)
from .humanize import (
humanize_bytes,
humanize_hash,
humanize_integer_sequence,
humanize_ipfs_uri,
humanize_seconds,
humanize_wei,
)
from .logging import (
DEBUG2_LEVEL_NUM,
ExtendedDebugLogger,
HasExtendedDebugLogger,
HasExtendedDebugLoggerMeta,
HasLogger,
HasLoggerMeta,
get_extended_debug_logger,
get_logger,
setup_DEBUG2_logging,
)
from .module_loading import (
import_string,
)
from .numeric import (
clamp,
)
from .types import (
is_boolean,
is_bytes,
is_dict,
is_integer,
is_list,
is_list_like,
is_null,
is_number,
is_string,
is_text,
is_tuple,
)

Some files were not shown because too many files have changed in this diff Show More