Skip to content

Commit

Permalink
Merge pull request #101 from Azulinho/next_release
Browse files Browse the repository at this point in the history
bugfixes
  • Loading branch information
Azulinho authored Jun 22, 2022
2 parents 7f12e0e + 7ca960c commit c4a11ac
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 27 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ RUN useradd -d /cryptobot -u 1001 -ms /bin/bash cryptobot
USER cryptobot
ENV HOME /cryptobot
WORKDIR /cryptobot
ADD .python-version .
RUN curl https://pyenv.run | bash
ENV PYENV_ROOT="$HOME/.pyenv"
ENV PATH="$PYENV_ROOT/bin:$PYENV_ROOT/shims/:$PATH"
ADD .python-version .
RUN CONFIGURE_OPTS="--enable-shared --fno-semantic-interposition --enable-optimizations --with-lto --with-pgo" pyenv install
RUN python -m venv /cryptobot/.venv
ADD requirements.txt .
Expand Down
29 changes: 26 additions & 3 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -1501,7 +1501,14 @@ def process_line(self, line: str) -> None:
self.tickers[symbol]["KLINES_TREND_PERIOD"],
self.tickers[symbol]["KLINES_SLICE_PERCENTAGE_CHANGE"],
)
self.load_klines_for_coin(self.coins[symbol])
if not self.load_klines_for_coin(self.coins[symbol]):
# got no klines data on this coin, probably delisted
# will remove this coin from our ticker list
if symbol not in self.wallet:
logging.warning(f"removing {symbol} from tickers")
del self.coins[symbol]
del self.tickers[symbol]
return
else:
# implements a PAUSE_FOR pause while reading from
# our price logs.
Expand Down Expand Up @@ -1587,7 +1594,7 @@ def backtesting(self) -> None:

f.write(f"{log_entry}\n")

def load_klines_for_coin(self, coin) -> None:
def load_klines_for_coin(self, coin) -> bool:
"""fetches from binance or a local cache klines for a coin"""

# when we initialise a coin, we pull a bunch of klines from binance
Expand Down Expand Up @@ -1637,15 +1644,29 @@ def load_klines_for_coin(self, coin) -> None:
logging.debug(f"(trying to read klines from {f_path}")
with open(f_path, "r") as f:
results = json.load(f)
# new listed coins will return an empty array
# so we bail out early here
if not results:
return True

_, _, high, low, _, _, closetime, _, _, _, _, _ = results[0]
except Exception: # pylint: disable=broad-except
logging.debug(
f"calling binance after failed read from {f_path}"
)
results = requests_with_backoff(query).json()
response = requests_with_backoff(query)
# binance will return a 400 for when a coin doesn't exist
if response.status_code == 400:
logging.warning(f"got a 400 from binance for {symbol}")
return False

results = response.json()
# this can be fairly API intensive for a large number of tickers
# so we cache these calls on disk, each coin, period, start day
# is md5sum'd and stored on a dedicated file on /cache
logging.debug(
f"writing klines data from binance into {f_path}"
)
if self.mode == "backtesting":
with open(f_path, "w") as f:
f.write(json.dumps(results))
Expand Down Expand Up @@ -1729,6 +1750,8 @@ def load_klines_for_coin(self, coin) -> None:
logging.debug(f"{symbol} : highest['h']:{coin.highest['h']}")
logging.debug(f"{symbol} : highest['d']:{coin.highest['d']}")

return True

def print_final_balance_report(self):
"""calculates and outputs final balance"""

Expand Down
40 changes: 21 additions & 19 deletions lib/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import requests
import udatetime
from binance.client import Client
from filelock import FileLock
from tenacity import retry, wait_exponential


Expand Down Expand Up @@ -43,7 +44,7 @@ def c_from_timestamp(date: float) -> datetime:


@lru_cache(512)
@retry(wait=wait_exponential(multiplier=1, max=10))
@retry(wait=wait_exponential(multiplier=1, max=3))
def requests_with_backoff(query: str):
"""retry wrapper for requests calls"""
response = requests.get(query)
Expand All @@ -57,34 +58,35 @@ def requests_with_backoff(query: str):
logging.warning(f"HTTP {status} from binance, sleeping for {backoff}s")
sleep(backoff)
response.raise_for_status()
response.raise_for_status()
return response


@retry(wait=wait_exponential(multiplier=15, max=10))
def cached_binance_client(access_key: str, secret_key: str) -> Client:
"""retry wrapper for binance client first call"""

lock = FileLock("state/binance.client.lockfile", timeout=10)
# when running automated-testing with multiple threads, we will hit
# api requests limits, this happens during the client initialization
# which mostly issues a ping. To avoid this when running multiple processes
# we cache the client in a pickled state on disk and load it if it already
# exists.
cachefile = "cache/binance.client"
if exists(cachefile) and (
udatetime.now().timestamp() - getctime(cachefile) < (30 * 60)
):
logging.debug("re-using local cached binance.client file")
with open(cachefile, "rb") as f:
_client = pickle.load(f)
else:
try:
logging.debug("refreshing cached binance.client")
_client = Client(access_key, secret_key)
except Exception as err:
logging.warning(f"API client exception: {err}")
raise Exception from err
with open(cachefile, "wb") as f:
pickle.dump(_client, f)

return _client
with lock:
if exists(cachefile) and (
udatetime.now().timestamp() - getctime(cachefile) < (30 * 60)
):
logging.debug("re-using local cached binance.client file")
with open(cachefile, "rb") as f:
_client = pickle.load(f)
else:
try:
logging.debug("refreshing cached binance.client")
_client = Client(access_key, secret_key)
except Exception as err:
logging.warning(f"API client exception: {err}")
raise Exception from err
with open(cachefile, "wb") as f:
pickle.dump(_client, f)

return _client
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,4 @@ web-pdb==1.5.7
websockets==9.1
xopen==1.2.1
yarl==1.7.2
filelock==3.7.1
19 changes: 15 additions & 4 deletions utils/automated-backtesting.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,13 @@ def split_logs_into_coins(filename, cfg):
return coinfiles


def wrap_subprocessing(config):
def wrap_subprocessing(config, timeout=None):
"""wraps subprocess call"""
subprocess.run(
"python app.py -m backtesting -s tests/fake.yaml "
+ f"-c configs/{config} >results/{config}.txt 2>&1",
shell=True,
timeout=timeout,
)


Expand Down Expand Up @@ -133,7 +134,11 @@ def gather_best_results_from_backtesting_log(log, minimum, kind, word, sortby):
"coincfg": coincfg,
}
else:
if w > coins[coin]["w"]:
if w >= coins[coin]["w"]:
# if this run has the same amount of wins but lower
# profit, then keep the old one
if w == coins[coin]["w"] and profit < coins[coin]["profit"]:
continue
coins[coin] = {
"profit": profit,
"wls": wls,
Expand Down Expand Up @@ -331,13 +336,19 @@ def main():
+ f"{args.min} on {args.sortby}\n"
)
# then we backtesting this strategy run against each coin
# ocasionally we get stuck runs, so we timeout a coin run
# to a maximum of 15 minutes
job = pool.submit(
wrap_subprocessing, f"coin.{symbol}.yaml"
wrap_subprocessing, f"coin.{symbol}.yaml", 900
)
tasks.append(job)

for t in tasks:
t.result()
try:
t.result()
except subprocess.TimeoutExpired as excp:
print(f"timeout while running: {excp}")


# finally we soak up the backtesting.log and generate the best
# config from all the runs in this strategy
Expand Down

0 comments on commit c4a11ac

Please sign in to comment.