This commit is contained in:
lz_db
2025-11-16 12:31:03 +08:00
commit 0fab423a18
1451 changed files with 743213 additions and 0 deletions

View File

@@ -0,0 +1 @@
# this file is a stub so that files inside of ccxt/rest are loaded

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,50 @@
import asyncio
import collections
from time import time
class Throttler:
def __init__(self, config, loop=None):
self.loop = loop
self.config = {
'refillRate': 1.0,
'delay': 0.001,
'cost': 1.0,
'tokens': 0,
'maxCapacity': 2000,
'capacity': 1.0,
}
self.config.update(config)
self.queue = collections.deque()
self.running = False
async def looper(self):
last_timestamp = time() * 1000
while self.running:
future, cost = self.queue[0]
cost = self.config['cost'] if cost is None else cost
if self.config['tokens'] >= 0:
self.config['tokens'] -= cost
if not future.done():
future.set_result(None)
self.queue.popleft()
# context switch
await asyncio.sleep(0)
if len(self.queue) == 0:
self.running = False
else:
await asyncio.sleep(self.config['delay'])
now = time() * 1000
elapsed = now - last_timestamp
last_timestamp = now
self.config['tokens'] = min(self.config['tokens'] + elapsed * self.config['refillRate'], self.config['capacity'])
def __call__(self, cost=None):
future = asyncio.Future()
if len(self.queue) > self.config['maxCapacity']:
raise RuntimeError('throttle queue is over maxCapacity (' + str(int(self.config['maxCapacity'])) + '), see https://docs.ccxt.com/#/README?id=maximum-requests-capacity')
self.queue.append((future, cost))
if not self.running:
self.running = True
asyncio.ensure_future(self.looper(), loop=self.loop)
return future

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
from ccxt.base import errors
# -----------------------------------------------------------------------------
from ccxt.base import decimal_to_precision
from ccxt import BaseError # noqa: F401
from ccxt import ExchangeError # noqa: F401
from ccxt import NotSupported # noqa: F401
from ccxt import AuthenticationError # noqa: F401
from ccxt import PermissionDenied # noqa: F401
from ccxt import AccountSuspended # noqa: F401
from ccxt import InvalidNonce # noqa: F401
from ccxt import InsufficientFunds # noqa: F401
from ccxt import InvalidOrder # noqa: F401
from ccxt import OrderNotFound # noqa: F401
from ccxt import OrderNotCached # noqa: F401
from ccxt import DuplicateOrderId # noqa: F401
from ccxt import CancelPending # noqa: F401
from ccxt import NetworkError # noqa: F401
from ccxt import DDoSProtection # noqa: F401
from ccxt import RateLimitExceeded # noqa: F401
from ccxt import RequestTimeout # noqa: F401
from ccxt import ExchangeNotAvailable # noqa: F401
from ccxt import OnMaintenance # noqa: F401
from ccxt import InvalidAddress # noqa: F401
from ccxt import AddressPending # noqa: F401
from ccxt import ArgumentsRequired # noqa: F401
from ccxt import BadRequest # noqa: F401
from ccxt import BadResponse # noqa: F401
from ccxt import NullResponse # noqa: F401
from ccxt import OrderImmediatelyFillable # noqa: F401
from ccxt import OrderNotFillable # noqa: F401
__all__ = decimal_to_precision.__all__ + errors.__all__ # noqa: F405

View File

@@ -0,0 +1,218 @@
import collections
import logging
logger = logging.getLogger(__name__)
class Delegate:
def __init__(self, name, delegated):
self.name = name
self.delegated = delegated
def __get__(self, instance, owner):
deque = getattr(instance, self.delegated)
return getattr(deque, self.name)
class BaseCache(list):
# implicitly called magic methods don't invoke __getattribute__
# https://docs.python.org/3/reference/datamodel.html#special-method-lookup
# all method lookups obey the descriptor protocol
# this is how the implicit api is defined in ccxt
__iter__ = Delegate('__iter__', '_deque')
__setitem__ = Delegate('__setitem__', '_deque')
__delitem__ = Delegate('__delitem__', '_deque')
__len__ = Delegate('__len__', '_deque')
__contains__ = Delegate('__contains__', '_deque')
__reversed__ = Delegate('__reversed__', '_deque')
clear = Delegate('clear', '_deque')
pop = Delegate('pop', '_deque')
def __init__(self, max_size=None):
super(BaseCache, self).__init__()
self.max_size = max_size
self._deque = collections.deque([], max_size)
def __eq__(self, other):
return list(self) == other
def __repr__(self):
return str(list(self))
def __add__(self, other):
return list(self) + other
def __getitem__(self, item):
# deque doesn't support slicing
deque = super(list, self).__getattribute__('_deque')
if isinstance(item, slice):
start, stop, step = item.indices(len(deque))
return [deque[i] for i in range(start, stop, step)]
else:
return deque[item]
# to be overriden
def getLimit(self, symbol, limit):
pass
# support transpiled snake_case calls
def get_limit(self, symbol, limit):
return self.getLimit(symbol, limit)
class ArrayCache(BaseCache):
def __init__(self, max_size=None):
super(ArrayCache, self).__init__(max_size)
self._nested_new_updates_by_symbol = False
self._new_updates_by_symbol = {}
self._clear_updates_by_symbol = {}
self._all_new_updates = 0
self._clear_all_updates = False
def getLimit(self, symbol, limit):
if symbol is None:
new_updates_value = self._all_new_updates
self._clear_all_updates = True
else:
new_updates_value = self._new_updates_by_symbol.get(symbol)
if new_updates_value is not None and self._nested_new_updates_by_symbol:
new_updates_value = len(new_updates_value)
self._clear_updates_by_symbol[symbol] = True
if new_updates_value is None:
return limit
elif limit is not None:
return min(new_updates_value, limit)
else:
return new_updates_value
def append(self, item):
self._deque.append(item)
if self._clear_all_updates:
self._clear_all_updates = False
self._clear_updates_by_symbol.clear()
self._all_new_updates = 0
self._new_updates_by_symbol.clear()
if self._clear_updates_by_symbol.get(item['symbol']):
self._clear_updates_by_symbol[item['symbol']] = False
self._new_updates_by_symbol[item['symbol']] = 0
self._new_updates_by_symbol[item['symbol']] = self._new_updates_by_symbol.get(item['symbol'], 0) + 1
self._all_new_updates = (self._all_new_updates or 0) + 1
class ArrayCacheByTimestamp(BaseCache):
def __init__(self, max_size=None):
super(ArrayCacheByTimestamp, self).__init__(max_size)
self.hashmap = {}
self._size_tracker = set()
self._new_updates = 0
self._clear_updates = False
def getLimit(self, symbol, limit):
self._clear_updates = True
if limit is None:
return self._new_updates
return min(self._new_updates, limit)
def append(self, item):
if item[0] in self.hashmap:
reference = self.hashmap[item[0]]
if reference != item:
reference[0:len(item)] = item
else:
self.hashmap[item[0]] = item
if len(self._deque) == self._deque.maxlen:
delete_reference = self._deque.popleft()
del self.hashmap[delete_reference[0]]
self._deque.append(item)
if self._clear_updates:
self._clear_updates = False
self._size_tracker.clear()
self._size_tracker.add(item[0])
self._new_updates = len(self._size_tracker)
class ArrayCacheBySymbolById(ArrayCache):
def __init__(self, max_size=None):
super(ArrayCacheBySymbolById, self).__init__(max_size)
self._nested_new_updates_by_symbol = True
self.hashmap = {}
self._index = collections.deque([], max_size)
def append(self, item):
by_id = self.hashmap.setdefault(item['symbol'], {})
if item['id'] in by_id:
reference = by_id[item['id']]
if reference != item:
reference.update(item)
item = reference
index = self._index.index(item['id'])
del self._deque[index]
del self._index[index]
else:
by_id[item['id']] = item
if len(self._deque) == self._deque.maxlen:
delete_item = self._deque.popleft()
self._index.popleft()
try:
del self.hashmap[delete_item['symbol']][delete_item['id']]
except Exception as e:
logger.error(f"Error deleting item from hashmap: {delete_item}. Error:{e}")
self._deque.append(item)
self._index.append(item['id'])
if self._clear_all_updates:
self._clear_all_updates = False
self._clear_updates_by_symbol.clear()
self._all_new_updates = 0
self._new_updates_by_symbol.clear()
if item['symbol'] not in self._new_updates_by_symbol:
self._new_updates_by_symbol[item['symbol']] = set()
if self._clear_updates_by_symbol.get(item['symbol']):
self._clear_updates_by_symbol[item['symbol']] = False
self._new_updates_by_symbol[item['symbol']].clear()
id_set = self._new_updates_by_symbol[item['symbol']]
before_length = len(id_set)
id_set.add(item['id'])
after_length = len(id_set)
self._all_new_updates = (self._all_new_updates or 0) + (after_length - before_length)
class ArrayCacheBySymbolBySide(ArrayCache):
def __init__(self, max_size=None):
super(ArrayCacheBySymbolBySide, self).__init__(max_size)
self._nested_new_updates_by_symbol = True
self.hashmap = {}
self._index = collections.deque([], max_size)
def append(self, item):
by_side = self.hashmap.setdefault(item['symbol'], {})
if item['side'] in by_side:
reference = by_side[item['side']]
if reference != item:
reference.update(item)
item = reference
index = self._index.index(item['symbol'] + item['side'])
del self._deque[index]
del self._index[index]
else:
by_side[item['side']] = item
if len(self._deque) == self._deque.maxlen:
delete_item = self._deque.popleft()
self._index.popleft()
del self.hashmap[delete_item['symbol']][delete_item['side']]
self._deque.append(item)
self._index.append(item['symbol'] + item['side'])
if self._clear_all_updates:
self._clear_all_updates = False
self._clear_updates_by_symbol.clear()
self._all_new_updates = 0
self._new_updates_by_symbol.clear()
if item['symbol'] not in self._new_updates_by_symbol:
self._new_updates_by_symbol[item['symbol']] = set()
if self._clear_updates_by_symbol.get(item['symbol']):
self._clear_updates_by_symbol[item['symbol']] = False
self._new_updates_by_symbol[item['symbol']].clear()
side_set = self._new_updates_by_symbol[item['symbol']]
before_length = len(side_set)
side_set.add(item['side'])
after_length = len(side_set)
self._all_new_updates = (self._all_new_updates or 0) + (after_length - before_length)

View File

@@ -0,0 +1,346 @@
# -*- coding: utf-8 -*-
orjson = None
try:
import orjson as orjson
except ImportError:
pass
import json
from asyncio import sleep, ensure_future, wait_for, TimeoutError, BaseEventLoop, Future as asyncioFuture
from .functions import milliseconds, iso8601, deep_extend, is_json_encoded_object
from ccxt import NetworkError, RequestTimeout
from ccxt.async_support.base.ws.future import Future
from ccxt.async_support.base.ws.functions import gunzip, inflate
from typing import Dict
from aiohttp import WSMsgType
class Client(object):
url = None
ws = None
futures: Dict[str, Future] = {}
options = {} # ws-specific options
subscriptions = {}
rejections = {}
on_message_callback = None
on_error_callback = None
on_close_callback = None
on_connected_callback = None
connectionStarted = None
connectionEstablished = None
isConnected = False
connectionTimeout = 10000 # ms, false to disable
connection = None
error = None # low-level networking exception, if any
connected = None # connection-related Future
keepAlive = 5000
heartbeat = True
maxPingPongMisses = 2.0 # how many missed pongs to raise a timeout
lastPong = None
ping = None # ping-function if defined
proxy = None
verbose = False # verbose output
gunzip = False
inflate = False
throttle = None
connecting = False
asyncio_loop: BaseEventLoop = None
ping_looper = None
decompressBinary = True # decompress binary messages by default
def __init__(self, url, on_message_callback, on_error_callback, on_close_callback, on_connected_callback, config={}):
defaults = {
'url': url,
'futures': {},
'subscriptions': {},
'rejections': {},
'on_message_callback': on_message_callback,
'on_error_callback': on_error_callback,
'on_close_callback': on_close_callback,
'on_connected_callback': on_connected_callback,
}
settings = {}
settings.update(defaults)
settings.update(config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
# connection-related Future
self.connected = Future()
def future(self, message_hash):
if message_hash not in self.futures or self.futures[message_hash].cancelled():
self.futures[message_hash] = Future()
future = self.futures[message_hash]
if message_hash in self.rejections:
future.reject(self.rejections[message_hash])
del self.rejections[message_hash]
return future
def reusable_future(self, message_hash):
return self.future(message_hash) # only used in go
def reusableFuture(self, message_hash):
return self.future(message_hash) # only used in go
def resolve(self, result, message_hash):
if self.verbose and message_hash is None:
self.log(iso8601(milliseconds()), 'resolve received None messageHash')
if message_hash in self.futures:
future = self.futures[message_hash]
future.resolve(result)
del self.futures[message_hash]
return result
def reject(self, result, message_hash=None):
if message_hash is not None:
if message_hash in self.futures:
future = self.futures[message_hash]
future.reject(result)
del self.futures[message_hash]
else:
self.rejections[message_hash] = result
else:
message_hashes = list(self.futures.keys())
for message_hash in message_hashes:
self.reject(result, message_hash)
return result
def receive_loop(self):
if self.verbose:
self.log(iso8601(milliseconds()), 'receive loop')
if not self.closed():
# let's drain the aiohttp buffer to avoid latency
if self.buffer and len(self.buffer) > 1:
size_delta = 0
while len(self.buffer) > 1:
message, size = self.buffer.popleft()
size_delta += size
self.handle_message(message)
# we must update the size of the last message inside WebSocketDataQueue
# self.receive() calls WebSocketDataQueue.read() that calls WebSocketDataQueue._read_from_buffer()
# which updates the size of the buffer, the _size will overflow and pause the transport
# make sure to set the enviroment variable AIOHTTP_NO_EXTENSIONS=Y to check
# print(self.connection._conn.protocol._payload._size)
self.buffer[0] = (self.buffer[0][0], self.buffer[0][1] + size_delta)
task = self.asyncio_loop.create_task(self.receive())
def after_interrupt(resolved: asyncioFuture):
exception = resolved.exception()
if exception is None:
self.handle_message(resolved.result())
self.asyncio_loop.call_soon(self.receive_loop)
else:
error = NetworkError(str(exception))
if self.verbose:
self.log(iso8601(milliseconds()), 'receive_loop', 'Exception', error)
self.reject(error)
task.add_done_callback(after_interrupt)
else:
# connection got terminated after the connection was made and before the receive loop ran
self.on_close(1006)
async def open(self, session, backoff_delay=0):
# exponential backoff for consequent connections if necessary
if backoff_delay:
await sleep(backoff_delay)
if self.verbose:
self.log(iso8601(milliseconds()), 'connecting to', self.url, 'with timeout', self.connectionTimeout, 'ms')
self.connectionStarted = milliseconds()
try:
coroutine = self.create_connection(session)
self.connection = await wait_for(coroutine, timeout=int(self.connectionTimeout / 1000))
self.connecting = False
self.connectionEstablished = milliseconds()
self.isConnected = True
if self.verbose:
self.log(iso8601(milliseconds()), 'connected')
self.connected.resolve(self.url)
self.on_connected_callback(self)
# run both loops forever
self.ping_looper = ensure_future(self.ping_loop(), loop=self.asyncio_loop)
self.asyncio_loop.call_soon(self.receive_loop)
except TimeoutError:
# connection timeout
error = RequestTimeout('Connection timeout')
if self.verbose:
self.log(iso8601(milliseconds()), 'RequestTimeout', error)
self.on_error(error)
except Exception as e:
# connection failed or rejected (ConnectionRefusedError, ClientConnectorError)
error = NetworkError(e)
if self.verbose:
self.log(iso8601(milliseconds()), 'NetworkError', error)
self.on_error(error)
@property
def buffer(self):
# looks like they exposed it in C
# this means we can bypass it
# https://github.com/aio-libs/aiohttp/blob/master/aiohttp/_websocket/reader_c.pxd#L53C24-L53C31
# these checks are necessary to protect these errors: AttributeError: 'NoneType' object has no attribute '_buffer'
# upon getting an error message
if self.connection is None:
return None
if self.connection._conn is None:
return None
if self.connection._conn.protocol is None:
return None
if self.connection._conn.protocol._payload is None:
return None
return self.connection._conn.protocol._payload._buffer
def connect(self, session, backoff_delay=0):
if not self.connection and not self.connecting:
self.connecting = True
ensure_future(self.open(session, backoff_delay), loop=self.asyncio_loop)
return self.connected
def on_error(self, error):
if self.verbose:
self.log(iso8601(milliseconds()), 'on_error', error)
self.error = error
self.reject(error)
self.on_error_callback(self, error)
if not self.closed():
ensure_future(self.close(1006), loop=self.asyncio_loop)
def on_close(self, code):
if self.verbose:
self.log(iso8601(milliseconds()), 'on_close', code)
if not self.error:
self.reject(NetworkError('Connection closed by remote server, closing code ' + str(code)))
self.on_close_callback(self, code)
ensure_future(self.aiohttp_close(), loop=self.asyncio_loop)
def log(self, *args):
print(*args)
def closed(self):
return (self.connection is None) or self.connection.closed
def receive(self):
return self.connection.receive()
# helper method for binary and text messages
def handle_text_or_binary_message(self, data):
if self.verbose:
self.log(iso8601(milliseconds()), 'message', data)
if isinstance(data, bytes):
if self.decompressBinary:
data = data.decode()
# decoded = json.loads(data) if is_json_encoded_object(data) else data
decode = None
if is_json_encoded_object(data):
if orjson is None:
decode = json.loads(data)
else:
decode = orjson.loads(data)
else:
decode = data
self.on_message_callback(self, decode)
def handle_message(self, message):
# self.log(iso8601(milliseconds()), message)
if message.type == WSMsgType.TEXT:
self.handle_text_or_binary_message(message.data)
elif message.type == WSMsgType.BINARY:
data = message.data
if self.gunzip:
data = gunzip(data)
elif self.inflate:
data = inflate(data)
self.handle_text_or_binary_message(data)
# autoping is responsible for automatically replying with pong
# to a ping incoming from a server, we have to disable autoping
# with aiohttp's websockets and respond with pong manually
# otherwise aiohttp's websockets client won't trigger WSMsgType.PONG
elif message.type == WSMsgType.PING:
if self.verbose:
self.log(iso8601(milliseconds()), 'ping', message)
ensure_future(self.connection.pong(message.data), loop=self.asyncio_loop)
elif message.type == WSMsgType.PONG:
self.lastPong = milliseconds()
if self.verbose:
self.log(iso8601(milliseconds()), 'pong', message)
pass
elif message.type == WSMsgType.CLOSE:
if self.verbose:
self.log(iso8601(milliseconds()), 'close', self.closed(), message)
self.on_close(message.data)
elif message.type == WSMsgType.ERROR:
if self.verbose:
self.log(iso8601(milliseconds()), 'error', message)
error = NetworkError(str(message))
self.on_error(error)
def create_connection(self, session):
# autoping is responsible for automatically replying with pong
# to a ping incoming from a server, we have to disable autoping
# with aiohttp's websockets and respond with pong manually
# otherwise aiohttp's websockets client won't trigger WSMsgType.PONG
# call aenter here to simulate async with otherwise we get the error "await not called with future"
# if connecting to a non-existent endpoint
if (self.proxy):
return session.ws_connect(self.url, autoping=False, autoclose=False, headers=self.options.get('headers'), proxy=self.proxy, max_msg_size=10485760).__aenter__()
return session.ws_connect(self.url, autoping=False, autoclose=False, headers=self.options.get('headers'), max_msg_size=10485760).__aenter__()
async def send(self, message):
if self.verbose:
self.log(iso8601(milliseconds()), 'sending', message)
send_msg = None
if isinstance(message, str):
send_msg = message
else:
if orjson is None:
send_msg = json.dumps(message, separators=(',', ':'))
else:
send_msg = orjson.dumps(message).decode('utf-8')
if self.closed():
raise ConnectionError('Cannot Send Message: Connection closed before send')
return await self.connection.send_str(send_msg)
async def close(self, code=1000):
if self.verbose:
self.log(iso8601(milliseconds()), 'closing', code)
for future in self.futures.values():
future.cancel()
await self.aiohttp_close()
async def aiohttp_close(self):
if not self.closed():
await self.connection.close()
# these will end automatically once self.closed() = True
# so we don't need to cancel them
if self.ping_looper:
self.ping_looper.cancel()
async def ping_loop(self):
if self.verbose:
self.log(iso8601(milliseconds()), 'ping loop')
while self.keepAlive and not self.closed():
now = milliseconds()
self.lastPong = now if self.lastPong is None else self.lastPong
if (self.lastPong + self.keepAlive * self.maxPingPongMisses) < now:
self.on_error(RequestTimeout('Connection to ' + self.url + ' timed out due to a ping-pong keepalive missing on time'))
# the following ping-clause is not necessary with aiohttp's built-in ws
# since it has a heartbeat option (see create_connection above)
# however some exchanges require a text-type ping message
# therefore we need this clause anyway
else:
if self.ping:
try:
await self.send(self.ping(self))
except Exception as e:
self.on_error(e)
else:
await self.connection.ping()
await sleep(self.keepAlive / 1000)

View File

@@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
from zlib import decompress, MAX_WBITS
from base64 import b64decode
from gzip import GzipFile
from io import BytesIO
import time
import datetime
def inflate(data):
return decompress(data, -MAX_WBITS)
def inflate64(data):
return inflate(b64decode(data))
def gunzip(data):
return GzipFile('', 'rb', 9, BytesIO(data)).read().decode('utf-8')
# Tmp : added methods below to avoid circular imports between exchange.py and aiohttp.py
def milliseconds():
return int(time.time() * 1000)
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, int):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.fromtimestamp(timestamp // 1000, datetime.timezone.utc)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
def is_json_encoded_object(input):
return (isinstance(input, str) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result

View File

@@ -0,0 +1,46 @@
import asyncio
# Test by running:
# - python python/ccxt/pro/test/base/test_close.py
# - python python/ccxt/pro/test/base/test_future.py
class Future(asyncio.Future):
def resolve(self, result=None):
if not self.done():
self.set_result(result)
def reject(self, error=None):
if not self.done():
self.set_exception(error)
@classmethod
def race(cls, futures):
future = Future()
coro = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
task = asyncio.create_task(coro)
def callback(done):
complete, _ = done.result()
# check for exceptions
exceptions = []
cancelled = False
for f in complete:
if f.cancelled():
cancelled = True
else:
err = f.exception()
if err:
exceptions.append(err)
# if any exceptions return with first exception
if future.cancelled():
return
if len(exceptions) > 0:
future.set_exception(exceptions[0])
# else return first result
elif cancelled:
future.cancel()
else:
first_result = list(complete)[0].result()
future.set_result(first_result)
task.add_done_callback(callback)
return future

View File

@@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
from ccxt.async_support.base.ws import order_book_side
from ccxt import Exchange
import sys
class OrderBook(dict):
def __init__(self, snapshot={}, depth=None):
self.cache = []
depth = depth or sys.maxsize
defaults = {
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
'symbol': None,
}
# do not mutate snapshot
defaults.update(snapshot)
if not isinstance(defaults['asks'], order_book_side.OrderBookSide):
defaults['asks'] = order_book_side.Asks(defaults['asks'], depth)
if not isinstance(defaults['bids'], order_book_side.OrderBookSide):
defaults['bids'] = order_book_side.Bids(defaults['bids'], depth)
defaults['datetime'] = Exchange.iso8601(defaults.get('timestamp'))
# merge to self
super(OrderBook, self).__init__(defaults)
def limit(self):
self['asks'].limit()
self['bids'].limit()
return self
def reset(self, snapshot={}):
self['asks']._index.clear()
self['asks'].clear()
for ask in snapshot.get('asks', []):
self['asks'].storeArray(ask)
self['bids']._index.clear()
self['bids'].clear()
for bid in snapshot.get('bids', []):
self['bids'].storeArray(bid)
self['nonce'] = snapshot.get('nonce')
self['timestamp'] = snapshot.get('timestamp')
self['datetime'] = Exchange.iso8601(self['timestamp'])
self['symbol'] = snapshot.get('symbol')
def update(self, snapshot):
nonce = snapshot.get('nonce')
if nonce is not None and self['nonce'] is not None and nonce < self['nonce']:
return self
self.reset(snapshot)
# -----------------------------------------------------------------------------
# overwrites absolute volumes at price levels
# or deletes price levels based on order counts (3rd value in a bidask delta)
class CountedOrderBook(OrderBook):
def __init__(self, snapshot={}, depth=None):
copy = Exchange.extend(snapshot, {
'asks': order_book_side.CountedAsks(snapshot.get('asks', []), depth),
'bids': order_book_side.CountedBids(snapshot.get('bids', []), depth),
})
super(CountedOrderBook, self).__init__(copy, depth)
# -----------------------------------------------------------------------------
# indexed by order ids (3rd value in a bidask delta)
class IndexedOrderBook(OrderBook):
def __init__(self, snapshot={}, depth=None):
copy = Exchange.extend(snapshot, {
'asks': order_book_side.IndexedAsks(snapshot.get('asks', []), depth),
'bids': order_book_side.IndexedBids(snapshot.get('bids', []), depth),
})
super(IndexedOrderBook, self).__init__(copy, depth)

View File

@@ -0,0 +1,174 @@
# -*- coding: utf-8 -*-
import sys
import bisect
"""Author: Carlo Revelli"""
"""Fast bisect bindings"""
"""https://github.com/python/cpython/blob/master/Modules/_bisectmodule.c"""
"""Performs a binary search when inserting keys in sorted order"""
class OrderBookSide(list):
side = None # set to True for bids and False for asks
def __init__(self, deltas=[], depth=None):
super(OrderBookSide, self).__init__()
self._depth = depth or sys.maxsize
self._n = sys.maxsize
# parallel to self
self._index = []
for delta in deltas:
self.storeArray(list(delta))
def store_array(self, delta):
return self.storeArray(delta)
def storeArray(self, delta):
price = delta[0]
size = delta[1]
index_price = -price if self.side else price
index = bisect.bisect_left(self._index, index_price)
if size:
if index < len(self._index) and self._index[index] == index_price:
self[index][1] = size
else:
self._index.insert(index, index_price)
self.insert(index, delta)
elif index < len(self._index) and self._index[index] == index_price:
del self._index[index]
del self[index]
def store(self, price, size):
self.storeArray([price, size])
def limit(self):
difference = len(self) - self._depth
for _ in range(difference):
self.remove_index(self.pop())
self._index.pop()
def remove_index(self, order):
pass
def __len__(self):
length = super(OrderBookSide, self).__len__()
return min(length, self._n)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.indices(len(self))
return [self[i] for i in range(start, stop, step)]
else:
return super(OrderBookSide, self).__getitem__(item)
def __eq__(self, other):
if isinstance(other, list):
return list(self) == other
return super(OrderBookSide, self).__eq__(other)
def __repr__(self):
return str(list(self))
# -----------------------------------------------------------------------------
# overwrites absolute volumes at price levels
# or deletes price levels based on order counts (3rd value in a bidask delta)
# this class stores vector arrays of values indexed by price
class CountedOrderBookSide(OrderBookSide):
def __init__(self, deltas=[], depth=None):
super(CountedOrderBookSide, self).__init__(deltas, depth)
def storeArray(self, delta):
price = delta[0]
size = delta[1]
count = delta[2]
index_price = -price if self.side else price
index = bisect.bisect_left(self._index, index_price)
if size and count:
if index < len(self._index) and self._index[index] == index_price:
self[index][1] = size
self[index][2] = count
else:
self._index.insert(index, index_price)
self.insert(index, delta)
elif index < len(self._index) and self._index[index] == index_price:
del self._index[index]
del self[index]
def store(self, price, size, count):
self.storeArray([price, size, count])
# -----------------------------------------------------------------------------
# indexed by order ids (3rd value in a bidask delta)
class IndexedOrderBookSide(OrderBookSide):
def __init__(self, deltas=[], depth=None):
self._hashmap = {}
super(IndexedOrderBookSide, self).__init__(deltas, depth)
def storeArray(self, delta):
price = delta[0]
if price is not None:
index_price = -price if self.side else price
else:
index_price = None
size = delta[1]
order_id = delta[2]
if size:
if order_id in self._hashmap:
old_price = self._hashmap[order_id]
index_price = index_price or old_price
# in case the price is not defined
delta[0] = abs(index_price)
# matches if price is not defined or if price matches
if index_price == old_price:
# just overwrite the old index
index = bisect.bisect_left(self._index, index_price)
while self[index][2] != order_id:
index += 1
self._index[index] = index_price
self[index] = delta
return
else:
# remove old price level
old_index = bisect.bisect_left(self._index, old_price)
while self[old_index][2] != order_id:
old_index += 1
del self._index[old_index]
del self[old_index]
# insert new price level
self._hashmap[order_id] = index_price
index = bisect.bisect_left(self._index, index_price)
while index < len (self._index) and self._index[index] == index_price and self[index][2] < order_id:
index += 1
self._index.insert(index, index_price)
self.insert(index, delta)
elif order_id in self._hashmap:
old_price = self._hashmap[order_id]
index = bisect.bisect_left(self._index, old_price)
while self[index][2] != order_id:
index += 1
del self._index[index]
del self[index]
del self._hashmap[order_id]
def remove_index(self, order):
order_id = order[2]
if order_id in self._hashmap:
del self._hashmap[order_id]
def store(self, price, size, order_id):
self.storeArray([price, size, order_id])
# -----------------------------------------------------------------------------
# a more elegant syntax is possible here, but native inheritance is portable
class Asks(OrderBookSide): side = False # noqa
class Bids(OrderBookSide): side = True # noqa
class CountedAsks(CountedOrderBookSide): side = False # noqa
class CountedBids(CountedOrderBookSide): side = True # noqa
class IndexedAsks(IndexedOrderBookSide): side = False # noqa
class IndexedBids(IndexedOrderBookSide): side = True # noqa