balik zmen uff
This commit is contained in:
64
testy/archive/alpacasnapshot.py
Normal file
64
testy/archive/alpacasnapshot.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
# 2 clients for historical data StockHistoricalDataClient (needs keys), CryptoHistoricalDataClient
|
||||||
|
# 2 clients for real time data CryptoDataStream, StockDataStream
|
||||||
|
|
||||||
|
|
||||||
|
# naimportuju si daneho clienta
|
||||||
|
from alpaca.data.historical import StockHistoricalDataClient, CryptoHistoricalDataClient
|
||||||
|
|
||||||
|
|
||||||
|
#pokdu pouzivam historicke data(tzn. REST) tak si naimportuju dany request object
|
||||||
|
from alpaca.data.requests import StockLatestQuoteRequest, StockBarsRequest, StockTradesRequest, StockSnapshotRequest
|
||||||
|
|
||||||
|
#objekty se kterymi pak pracuju (jsou soucasi package výše, tady jen informačně)
|
||||||
|
from alpaca.data import Quote, Trade, Snapshot, Bar
|
||||||
|
from alpaca.data.models import BarSet, QuoteSet, TradeSet
|
||||||
|
from alpaca.data.timeframe import TimeFrame, TimeFrameUnit
|
||||||
|
from v2realbot.utils.utils import zoneNY
|
||||||
|
from v2realbot.config import ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY
|
||||||
|
from config import API_KEY, SECRET_KEY
|
||||||
|
from alpaca.data.enums import DataFeed
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import pandas as pd
|
||||||
|
from rich import print
|
||||||
|
from collections import defaultdict
|
||||||
|
from pandas import to_datetime
|
||||||
|
from msgpack.ext import Timestamp
|
||||||
|
from v2realbot.utils.historicals import convert_daily_bars
|
||||||
|
|
||||||
|
def get_last_close():
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_todays_open():
|
||||||
|
pass
|
||||||
|
|
||||||
|
##vrati historicke bary v nasem formatu
|
||||||
|
def get_historical_bars(symbol: str, time_from: datetime, time_to: datetime, timeframe: TimeFrame):
|
||||||
|
stock_client = StockHistoricalDataClient(ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, raw_data=True)
|
||||||
|
bar_request = StockBarsRequest(symbol_or_symbols=symbol,timeframe=timeframe, start=time_from, end=time_to, feed=DataFeed.SIP)
|
||||||
|
bars: BarSet = stock_client.get_stock_bars(bar_request)
|
||||||
|
print("puvodni bars", bars["BAC"])
|
||||||
|
print(bars)
|
||||||
|
return convert_daily_bars(bars[symbol])
|
||||||
|
|
||||||
|
|
||||||
|
#v initu plnime pozadovana historicka data do historicals[]
|
||||||
|
#zatim natvrdo
|
||||||
|
#last 30 days bars
|
||||||
|
|
||||||
|
|
||||||
|
#get 30 days
|
||||||
|
time_to = datetime.now(tz=zoneNY)
|
||||||
|
time_from = time_to - timedelta(days=2)
|
||||||
|
|
||||||
|
bary = get_historical_bars("BAC", time_from, time_to, TimeFrame.Hour)
|
||||||
|
print(bary)
|
||||||
|
historicals = defaultdict(dict)
|
||||||
|
historicals["30"] = bary
|
||||||
|
print(historicals)
|
||||||
|
|
||||||
|
# stock_client = StockHistoricalDataClient(ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, raw_data=True)
|
||||||
|
# snapshotRequest = StockSnapshotRequest(symbol_or_symbols=["BAC"], feed="sip")
|
||||||
|
# snapshotResponse = stock_client.get_stock_snapshot(snapshotRequest)
|
||||||
|
# print("snapshot", snapshotResponse)
|
||||||
|
# snapshotResponse["BAC"]["dailyBar"]
|
||||||
|
|
||||||
@ -15,3 +15,6 @@ y_interp = spi.interp1d(x, y)
|
|||||||
#create plot of x vs. y
|
#create plot of x vs. y
|
||||||
#plt.plot(x, y, '-ob')
|
#plt.plot(x, y, '-ob')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
5
testy/archive/npinterpol.py
Normal file
5
testy/archive/npinterpol.py
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
atr10 = 11.1
|
||||||
|
print(np.interp(atr10, [1, 10,11,12], [0, 1,100,1]))
|
||||||
22
testy/fourier.py
Normal file
22
testy/fourier.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
import numpy as np
|
||||||
|
import scipy.fft as fft
|
||||||
|
|
||||||
|
|
||||||
|
time_series = np.array(prices)
|
||||||
|
n = len(time_series)
|
||||||
|
|
||||||
|
# Compute the Fourier transform
|
||||||
|
yf = fft(time_series)
|
||||||
|
xf = np.linspace(0.0, 1.0/(2.0), n//2)
|
||||||
|
# Compute the Fourier transform
|
||||||
|
yf = np.abs(fft(time_series))
|
||||||
|
|
||||||
|
# Find the corresponding frequencies
|
||||||
|
frequencies = xf
|
||||||
|
|
||||||
|
# Find the corresponding amplitudes
|
||||||
|
amplitudes = 2.0/n * np.abs(yf[:n//2])
|
||||||
|
|
||||||
|
# Interpret the amplitudes and frequencies
|
||||||
|
for freq, ampl in zip(frequencies, amplitudes):
|
||||||
|
print(f"Frequency: {freq}, Amplitude: {ampl}")
|
||||||
106
testy/localmaximatest.py
Normal file
106
testy/localmaximatest.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from v2realbot.controller.services import get_archived_runner_details_byID
|
||||||
|
from v2realbot.common.model import RunArchiveDetail
|
||||||
|
from scipy.signal import argrelextrema
|
||||||
|
|
||||||
|
id = "c5ae757f-6bdd-4d1f-84a8-98bdaad65a28"
|
||||||
|
|
||||||
|
res, val = get_archived_runner_details_byID(id)
|
||||||
|
if res < 0:
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
detail = RunArchiveDetail(**val)
|
||||||
|
# detail.indicators[0]
|
||||||
|
price_series = np.array(detail.bars["vwap"])
|
||||||
|
#price_series = detail.bars["vwap"]
|
||||||
|
timestamps = detail.bars["time"]
|
||||||
|
|
||||||
|
prices = []
|
||||||
|
#TODO pridat k indikatorum convert to numpy, abych mohl pouzivat numpy operace v expressionu
|
||||||
|
|
||||||
|
|
||||||
|
def get_local_maxima_numpy(
|
||||||
|
series: np.ndarray,
|
||||||
|
debug=False,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""calculate local maximal point"""
|
||||||
|
if series.size == 0:
|
||||||
|
return np.array([])
|
||||||
|
|
||||||
|
# Calculate the difference between adjacent elements.
|
||||||
|
diff = np.diff(series)
|
||||||
|
|
||||||
|
# Find the indices of the elements where the difference changes sign from positive to negative.
|
||||||
|
high_index = np.where((diff[:-1] >= 0) & (diff[1:] < 0))[0] + 1
|
||||||
|
|
||||||
|
# Return a NumPy array containing the local maxima.
|
||||||
|
return high_index#series[high_index]
|
||||||
|
|
||||||
|
def get_local_minima_numpy(
|
||||||
|
series: np.ndarray,
|
||||||
|
debug=False,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""calculate local maximal point"""
|
||||||
|
if series.size == 0:
|
||||||
|
return np.array([])
|
||||||
|
|
||||||
|
# Calculate the difference between adjacent elements.
|
||||||
|
diff = np.diff(series)
|
||||||
|
|
||||||
|
# Find the indices of the elements where the difference changes sign from positive to negative.
|
||||||
|
low_index = np.where((diff[:-1] <= 0) & (diff[1:] > 0))[0] + 1
|
||||||
|
|
||||||
|
# Return a NumPy array containing the local maxima.
|
||||||
|
return low_index#series[high_index]
|
||||||
|
|
||||||
|
def get_local_minima(prices):
|
||||||
|
return prices[-2] if len(prices) >= 3 and prices[-2] > prices[-3] and prices[-2] > prices[-1] else None
|
||||||
|
|
||||||
|
# iter_prices = []
|
||||||
|
# for price in detail.bars["vwap"]:
|
||||||
|
# iter_prices.append(price)
|
||||||
|
# get_local_minima(iter_prices)
|
||||||
|
|
||||||
|
def calculate_support_resistance(bars, window=5):
|
||||||
|
lows = np.array(bars['low'])
|
||||||
|
highs = np.array(bars['high'])
|
||||||
|
|
||||||
|
rolling_support = np.minimum.accumulate(lows)[::-1][:window][::-1]
|
||||||
|
rolling_resistance = np.maximum.accumulate(highs)[::-1][:window][::-1]
|
||||||
|
|
||||||
|
return {'rolling_support': rolling_support.tolist(), 'rolling_resistance': rolling_resistance.tolist()}
|
||||||
|
|
||||||
|
rolling = calculate_support_resistance(detail.bars, 5)
|
||||||
|
print(rolling)
|
||||||
|
|
||||||
|
|
||||||
|
# func = "prices[-1] if np.all(prices[-1] > prices[-2:]) else 0"
|
||||||
|
# #func = "prices[-2] if len(prices) >= 3 and prices[-2] > prices[-3] and prices[-2] > prices[-1] else None"
|
||||||
|
# for price in price_series:
|
||||||
|
# prices.append(price)
|
||||||
|
# print(eval(func))
|
||||||
|
# maxima_indices = argrelextrema(price_series, np.greater)[0]
|
||||||
|
# minima_indices = argrelextrema(price_series, np.less)[0]
|
||||||
|
# # Print the indices of local maxima and minima
|
||||||
|
# print("Local Maxima Indices:", maxima_indices)
|
||||||
|
# print("Local Minima Indices:", minima_indices)
|
||||||
|
|
||||||
|
print("from new function")
|
||||||
|
maxima_indices = get_local_maxima_numpy(price_series)
|
||||||
|
minima_indices = get_local_minima_numpy(price_series)
|
||||||
|
print("Local Maxima Indices:", maxima_indices)
|
||||||
|
print("Local Minima Indices:", minima_indices)
|
||||||
|
|
||||||
|
# Plot the price series with local maxima and minima
|
||||||
|
plt.figure(figsize=(10, 6))
|
||||||
|
plt.plot(range(len(price_series)), price_series, label='Price Series')
|
||||||
|
plt.scatter(maxima_indices, price_series[maxima_indices], color='r', label='Local Maxima', zorder=5)
|
||||||
|
plt.scatter(minima_indices, price_series[minima_indices], color='g', label='Local Minima', zorder=5)
|
||||||
|
plt.xlabel('Time')
|
||||||
|
plt.ylabel('Price')
|
||||||
|
plt.title('Price Series with Local Maxima and Minima')
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
74
testy/scaffoldings/custom_ind.py
Normal file
74
testy/scaffoldings/custom_ind.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from v2realbot.controller.services import get_archived_runner_details_byID, preview_indicator_byTOML
|
||||||
|
from v2realbot.common.model import RunArchiveDetail, InstantIndicator
|
||||||
|
from scipy.signal import argrelextrema
|
||||||
|
from v2realbot.utils.utils import AttributeDict, zoneNY, zonePRG, safe_get, dict_replace_value, Store, parse_toml_string, json_serial, is_open_hours, send_to_telegram
|
||||||
|
|
||||||
|
##SCAFFOLDING for development of new indicator
|
||||||
|
|
||||||
|
runner_id = "7512b097-1f29-4c61-a331-2b1a40fd3f91"
|
||||||
|
|
||||||
|
toml = """
|
||||||
|
#[stratvars.indicators.local_maxik]
|
||||||
|
type = 'custom'
|
||||||
|
subtype = 'basestats'
|
||||||
|
on_confirmed_only = true
|
||||||
|
cp.lookback = 30
|
||||||
|
cp.source = 'vwap'
|
||||||
|
cp.function = 'maxima'
|
||||||
|
"""
|
||||||
|
|
||||||
|
toml = """
|
||||||
|
type = 'custom'
|
||||||
|
subtype = 'expression'
|
||||||
|
on_confirmed_only = true
|
||||||
|
cp.expression = 'int(utls.is_pivot(high,3))'
|
||||||
|
"""
|
||||||
|
|
||||||
|
toml = """
|
||||||
|
type = 'custom'
|
||||||
|
subtype = 'expression'
|
||||||
|
on_confirmed_only = true
|
||||||
|
cp.expression = 'int(utls.is_pivot(high,3))'
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
res, val = get_archived_runner_details_byID(runner_id)
|
||||||
|
if res < 0:
|
||||||
|
print("error fetching runner")
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
detail = RunArchiveDetail(**val)
|
||||||
|
|
||||||
|
res, toml_parsed = parse_toml_string(toml)
|
||||||
|
if res < 0:
|
||||||
|
print("invalid tml",res, toml)
|
||||||
|
print(toml_parsed)
|
||||||
|
#toml_parsed = AttributeDict(**toml_parsed)
|
||||||
|
# for i in toml_parsed["stratvars"]["indicators"]:
|
||||||
|
# break
|
||||||
|
|
||||||
|
ind: InstantIndicator = InstantIndicator(name="testind", toml=toml)
|
||||||
|
|
||||||
|
result, new_ind_values = preview_indicator_byTOML(id=runner_id, indicator=ind)
|
||||||
|
if result < 0:
|
||||||
|
print("error", result, val)
|
||||||
|
|
||||||
|
# detail.indicators[0]
|
||||||
|
price_series = np.array(detail.bars["vwap"])
|
||||||
|
new_ind_value = np.array(new_ind_values)
|
||||||
|
#price_series = detail.bars["vwap"]
|
||||||
|
#timestamps = detail.bars["time"]
|
||||||
|
|
||||||
|
# Plot the price series with local maxima and minima
|
||||||
|
plt.figure(figsize=(10, 6))
|
||||||
|
plt.plot(range(len(price_series)), price_series, label='Price')
|
||||||
|
plt.plot(range(len(new_ind_value)), new_ind_value, label='Indicator')
|
||||||
|
plt.xlabel('Time')
|
||||||
|
plt.ylabel('Price')
|
||||||
|
plt.title('Price Series with Local Maxima and Minima')
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
19
testy/templatetest.py
Normal file
19
testy/templatetest.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from v2realbot.controller.services import get_archived_runner_details_byID
|
||||||
|
from v2realbot.common.model import RunArchiveDetail
|
||||||
|
# Generate sample price data
|
||||||
|
timestamps = np.arange('2023-10-27', '2023-10-28', dtype='datetime64[s]')
|
||||||
|
price = 100 + np.arange(100) * 0.5
|
||||||
|
|
||||||
|
id = "e74b5d35-6552-4dfc-ba59-2eda215af292"
|
||||||
|
|
||||||
|
res, val = get_archived_runner_details_byID(id)
|
||||||
|
if res < 0:
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
detail = RunArchiveDetail(**val)
|
||||||
|
# detail.indicators[0]
|
||||||
|
price = detail.bars["vwap"]
|
||||||
|
timestamps = detail.bars["time"]
|
||||||
|
|
||||||
124
testy/testsuppressmedium.py
Normal file
124
testy/testsuppressmedium.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from v2realbot.controller.services import get_archived_runner_details_byID
|
||||||
|
from v2realbot.common.model import RunArchiveDetail
|
||||||
|
from scipy.signal import argrelextrema
|
||||||
|
import mplfinance
|
||||||
|
|
||||||
|
id = "e74b5d35-6552-4dfc-ba59-2eda215af292"
|
||||||
|
|
||||||
|
res, val = get_archived_runner_details_byID(id)
|
||||||
|
if res < 0:
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
detail = RunArchiveDetail(**val)
|
||||||
|
# detail.indicators[0]
|
||||||
|
price_series = np.array(detail.bars["vwap"])
|
||||||
|
df = {}
|
||||||
|
highs = np.array(detail.bars["high"])
|
||||||
|
lows = np.array(detail.bars["low"])
|
||||||
|
|
||||||
|
np_high = np.array(detail.bars["high"])
|
||||||
|
np_low = np.array(detail.bars["low"])
|
||||||
|
price_series = detail.bars["vwap"]
|
||||||
|
timestamps = detail.bars["time"]
|
||||||
|
|
||||||
|
prices = []
|
||||||
|
#TODO pridat k indikatorum convert to numpy, abych mohl pouzivat numpy operace v expressionu
|
||||||
|
|
||||||
|
# func = "prices[-1] if np.all(prices[-1] > prices[-2:]) else 0"
|
||||||
|
# #func = "prices[-2] if len(prices) >= 3 and prices[-2] > prices[-3] and prices[-2] > prices[-1] else None"
|
||||||
|
# for price in price_series:
|
||||||
|
# prices.append(price)
|
||||||
|
# print(eval(func))
|
||||||
|
|
||||||
|
class Sup_Res_Finder():
|
||||||
|
def __init__(self, s=None):
|
||||||
|
if s is None:
|
||||||
|
self.s = np.mean(np.diff(np.concatenate([[np.nan], np.highs, [np.nan]], axis=0)))
|
||||||
|
else:
|
||||||
|
self.s = s
|
||||||
|
|
||||||
|
def isSupport(self, lows, i):
|
||||||
|
support = lows[i] < lows[i-1] and lows[i] < lows[i+1] \
|
||||||
|
and lows[i+1] < lows[i+2] and lows[i-1] < lows[i-2]
|
||||||
|
|
||||||
|
return support
|
||||||
|
|
||||||
|
def isResistance(self, highs, i):
|
||||||
|
resistance = highs[i] > highs[i-1] and highs[i] > highs[i+1] \
|
||||||
|
and highs[i+1] > highs[i+2] and highs[i-1] > highs[i-2]
|
||||||
|
|
||||||
|
return resistance
|
||||||
|
|
||||||
|
def find_levels(self, highs, lows):
|
||||||
|
levels = []
|
||||||
|
|
||||||
|
for i in range(2, len(lows) - 2):
|
||||||
|
if self.isSupport(lows, i):
|
||||||
|
l = lows[i]
|
||||||
|
|
||||||
|
if not np.any([abs(l - x) < self.s for x in levels]):
|
||||||
|
levels.append((i, l))
|
||||||
|
|
||||||
|
elif self.isResistance(highs, i):
|
||||||
|
l = highs[i]
|
||||||
|
|
||||||
|
if not np.any([abs(l - x) < self.s for x in levels]):
|
||||||
|
levels.append((i, l))
|
||||||
|
|
||||||
|
return levels
|
||||||
|
|
||||||
|
def plot_ohlc_with_support_resistance(bars, s=None):
|
||||||
|
highs = np.array(bars['high'])
|
||||||
|
lows = np.array(bars['low'])
|
||||||
|
|
||||||
|
finder = Sup_Res_Finder(s=s)
|
||||||
|
levels = finder.find_levels(highs, lows)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
|
||||||
|
# Plot the candlesticks
|
||||||
|
|
||||||
|
ax.plot(bars['time'], highs, color='green', linestyle='-', linewidth=0.8)
|
||||||
|
ax.plot(bars['time'], lows, color='red', linestyle='-', linewidth=0.8)
|
||||||
|
ax.fill_between(bars['time'], highs, lows, color='green' if highs[0] > lows[0] else 'red', alpha=0.5)
|
||||||
|
|
||||||
|
# Plot the support and resistance levels
|
||||||
|
|
||||||
|
for level in levels:
|
||||||
|
ax.hlines(level[1], level[0] - 0.5, level[0] + 0.5, color='black', linewidth=1)
|
||||||
|
|
||||||
|
ax.set_xlabel('Time')
|
||||||
|
ax.set_ylabel('Price')
|
||||||
|
ax.set_title('OHLC Chart with Support and Resistance Levels')
|
||||||
|
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
plot_ohlc_with_support_resistance(detail.bars, 0.05)
|
||||||
|
|
||||||
|
# print(price_series)
|
||||||
|
# # Find local maxima and minima using the optimized function.
|
||||||
|
# maxima_indices = argrelextrema(price_series, np.greater)[0]
|
||||||
|
# minima_indices = argrelextrema(price_series, np.less)[0]
|
||||||
|
# print(maxima_indices)
|
||||||
|
# print(minima_indices)
|
||||||
|
# # # Find local maxima and minima
|
||||||
|
# # maxima_indices = argrelextrema(price_series, np.greater)[0]
|
||||||
|
# # minima_indices = argrelextrema(price_series, np.less)[0]
|
||||||
|
|
||||||
|
# Plot the price series with local maxima and minima
|
||||||
|
# plt.figure(figsize=(10, 6))
|
||||||
|
# plt.plot(range(len(price_series)), price_series, label='Price Series')
|
||||||
|
# plt.scatter(maxima_indices, price_series[maxima_indices], color='r', label='Local Maxima', zorder=5)
|
||||||
|
# plt.scatter(minima_indices, price_series[minima_indices], color='g', label='Local Minima', zorder=5)
|
||||||
|
# plt.xlabel('Time')
|
||||||
|
# plt.ylabel('Price')
|
||||||
|
# plt.title('Price Series with Local Maxima and Minima')
|
||||||
|
# plt.legend()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# # Print the indices of local maxima and minima
|
||||||
|
# print("Local Maxima Indices:", maxima_indices)
|
||||||
|
# print("Local Minima Indices:", minima_indices)
|
||||||
23
testy/vectorbt/test.py
Normal file
23
testy/vectorbt/test.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
import vectorbt as vb
|
||||||
|
|
||||||
|
|
||||||
|
class ShortOnCloseBreakoutStrategy:
|
||||||
|
def init(self):
|
||||||
|
self.last_close = self.data.close[-1]
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
# Enter a short position when the price is below the last day's close
|
||||||
|
if self.data.close < self.last_close:
|
||||||
|
self.sell()
|
||||||
|
|
||||||
|
# Exit the short position after 10 ticks
|
||||||
|
elif self.data.close > self.last_close + 10:
|
||||||
|
self.buy()
|
||||||
|
|
||||||
|
# Create a backtest object
|
||||||
|
#backtest = vb.Backtest(ShortOnCloseBreakoutStrategy())
|
||||||
|
|
||||||
|
# Load the closing prices for the assets in the portfolio
|
||||||
|
close = vb.YFData.download('AAPL', start='2023-01-01').get('Close')
|
||||||
|
print(close)
|
||||||
|
# Backtest the strategy
|
||||||
51
testy/volatilitytest.py
Normal file
51
testy/volatilitytest.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from v2realbot.controller.services import get_archived_runner_details_byID
|
||||||
|
from v2realbot.common.model import RunArchiveDetail
|
||||||
|
# Generate sample price data
|
||||||
|
timestamps = np.arange('2023-10-27', '2023-10-28', dtype='datetime64[s]')
|
||||||
|
price = 100 + np.arange(100) * 0.5
|
||||||
|
|
||||||
|
id = "e74b5d35-6552-4dfc-ba59-2eda215af292"
|
||||||
|
|
||||||
|
res, val = get_archived_runner_details_byID(id)
|
||||||
|
if res < 0:
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
detail = RunArchiveDetail(**val)
|
||||||
|
# detail.indicators[0]
|
||||||
|
price = detail.bars["vwap"]
|
||||||
|
timestamps = detail.bars["time"]
|
||||||
|
|
||||||
|
# Calculate the standard deviation of price changes over a specified time interval
|
||||||
|
def calculate_volatility(price, window):
|
||||||
|
volatility = np.zeros_like(price)
|
||||||
|
for i in range(window, len(price)):
|
||||||
|
volatility[i] = np.std(price[i - window: i])
|
||||||
|
return volatility
|
||||||
|
|
||||||
|
# Set a threshold for the indicator
|
||||||
|
threshold = 0.4
|
||||||
|
|
||||||
|
# Identify breakout points based on the threshold
|
||||||
|
def identify_breakouts(volatility, threshold):
|
||||||
|
return volatility > threshold
|
||||||
|
|
||||||
|
# Plot the price data and the volatility breakout points
|
||||||
|
def plot_data(timestamps, price, breakout_points):
|
||||||
|
plt.figure(figsize=(12, 6))
|
||||||
|
plt.plot(timestamps, price, label='Price')
|
||||||
|
breakout_timestamps = timestamps[np.where(breakout_points)[0]]
|
||||||
|
breakout_prices = price[np.where(breakout_points)[0]]
|
||||||
|
plt.scatter(breakout_timestamps, breakout_prices, color='r', label='Volatility Breakout')
|
||||||
|
plt.xlabel('Time')
|
||||||
|
plt.ylabel('Price')
|
||||||
|
plt.title('Intraday Volatility Breakout Indicator')
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
# Applying the functions
|
||||||
|
window = 30
|
||||||
|
volatility = calculate_volatility(price, window)
|
||||||
|
breakout_points = identify_breakouts(volatility, threshold)
|
||||||
|
plot_data(timestamps, price, breakout_points)
|
||||||
@ -4,12 +4,13 @@ from v2realbot.strategy.base import StrategyState
|
|||||||
from v2realbot.strategy.StrategyOrderLimitVykladaciNormalizedMYSELL import StrategyOrderLimitVykladaciNormalizedMYSELL
|
from v2realbot.strategy.StrategyOrderLimitVykladaciNormalizedMYSELL import StrategyOrderLimitVykladaciNormalizedMYSELL
|
||||||
from v2realbot.enums.enums import RecordType, StartBarAlign, Mode, Account
|
from v2realbot.enums.enums import RecordType, StartBarAlign, Mode, Account
|
||||||
from v2realbot.utils.utils import zoneNY, print
|
from v2realbot.utils.utils import zoneNY, print
|
||||||
from datetime import datetime
|
from v2realbot.utils.historicals import get_historical_bars
|
||||||
|
from datetime import datetime, timedelta
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from threading import Event
|
from threading import Event
|
||||||
import os
|
import os
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
|
from alpaca.data.timeframe import TimeFrame, TimeFrameUnit
|
||||||
from v2realbot.strategyblocks.newtrade.prescribedtrades import execute_prescribed_trades
|
from v2realbot.strategyblocks.newtrade.prescribedtrades import execute_prescribed_trades
|
||||||
from v2realbot.strategyblocks.newtrade.signals import signal_search
|
from v2realbot.strategyblocks.newtrade.signals import signal_search
|
||||||
from v2realbot.strategyblocks.activetrade.activetrade_hub import manage_active_trade
|
from v2realbot.strategyblocks.activetrade.activetrade_hub import manage_active_trade
|
||||||
@ -124,6 +125,30 @@ def init(state: StrategyState):
|
|||||||
state.ind_mapping = {**local_dict_inds, **local_dict_bars}
|
state.ind_mapping = {**local_dict_inds, **local_dict_bars}
|
||||||
printanyway("IND MAPPING DONE:", state.ind_mapping)
|
printanyway("IND MAPPING DONE:", state.ind_mapping)
|
||||||
|
|
||||||
|
#30 DAYS historicall data fill - pridat do base pokud se osvedci
|
||||||
|
# -1 je vždy včerejšek v tomto případě
|
||||||
|
#diky tomu mají indikátory data 30 dní zpět (tzn. můžu počítat last day close, atp)
|
||||||
|
#do budoucna systematizovat přístup k historickým dat
|
||||||
|
# např. historicals.days state.historical.bars["days"]atp.
|
||||||
|
#nyní jednoucelne state.dailyBars
|
||||||
|
|
||||||
|
#LIVE a PAPER - bereme time now
|
||||||
|
#BT - bereme time bt_start
|
||||||
|
if state.mode in (Mode.LIVE, Mode.PAPER):
|
||||||
|
time_to = datetime.now(tz=zoneNY)
|
||||||
|
else:
|
||||||
|
time_to = state.bt.bp_from
|
||||||
|
|
||||||
|
|
||||||
|
#TBD pridat i hour data - pro pocitani RSI na hodine
|
||||||
|
#get 30 days (time_from musí být alespoň -2 aby to bralo i vcerejsek)
|
||||||
|
time_from = time_to - timedelta(days=40)
|
||||||
|
time_to = time_to - timedelta(days=1)
|
||||||
|
state.dailyBars = get_historical_bars(state.symbol, time_from, time_to, TimeFrame.Day)
|
||||||
|
#printanyway("daily bars FILLED", state.dailyBars)
|
||||||
|
#zatim ukladame do extData - pro instant indicatory a gui
|
||||||
|
state.extData["dailyBars"] = state.dailyBars
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
name = os.path.basename(__file__)
|
name = os.path.basename(__file__)
|
||||||
se = Event()
|
se = Event()
|
||||||
|
|||||||
@ -277,18 +277,20 @@ def is_runner_running(id: UUID):
|
|||||||
|
|
||||||
def save_history(id: UUID, st: object, runner: Runner, reason: str = None):
|
def save_history(id: UUID, st: object, runner: Runner, reason: str = None):
|
||||||
|
|
||||||
#zkousime precist profit z objektu
|
# #zkousime precist profit z objektu
|
||||||
try:
|
# try:
|
||||||
profit = st.state.profit
|
# profit = st.state.profit
|
||||||
trade_count = len(st.state.tradeList)
|
# trade_count = len(st.state.tradeList)
|
||||||
except Exception as e:
|
# except Exception as e:
|
||||||
profit = str(e)
|
# profit = str(e)
|
||||||
|
|
||||||
for i in db.stratins:
|
#zapisujeme pouze reason - pouzito jen pri exceptione
|
||||||
if str(i.id) == str(id):
|
if reason is not None:
|
||||||
i.history += "START:"+str(runner.run_started)+"STOP:"+str(runner.run_stopped)+"ACC:"+runner.run_account.value+"M:"+runner.run_mode.value+"PROFIT:"+str(round(profit,2))+ "TradeCNT:"+str(trade_count) + "REASON:" + str(reason)
|
for i in db.stratins:
|
||||||
#i.history += str(runner.__dict__)+"<BR>"
|
if str(i.id) == str(id):
|
||||||
db.save()
|
i.history += "\nREASON:" + str(reason)
|
||||||
|
#i.history += str(runner.__dict__)+"<BR>"
|
||||||
|
db.save()
|
||||||
|
|
||||||
#Capsule to run the thread in. Needed in order to update db after strat ends for any reason#
|
#Capsule to run the thread in. Needed in order to update db after strat ends for any reason#
|
||||||
def capsule(target: object, db: object, inter_batch_params: dict = None):
|
def capsule(target: object, db: object, inter_batch_params: dict = None):
|
||||||
@ -302,7 +304,7 @@ def capsule(target: object, db: object, inter_batch_params: dict = None):
|
|||||||
target.start()
|
target.start()
|
||||||
|
|
||||||
print("Strategy instance stopped. Update runners")
|
print("Strategy instance stopped. Update runners")
|
||||||
reason = "SHUTDOWN OK"
|
reason = None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
reason = "SHUTDOWN Exception:" + str(e) + format_exc()
|
reason = "SHUTDOWN Exception:" + str(e) + format_exc()
|
||||||
#raise RuntimeError('Exception v runneru POZOR') from e
|
#raise RuntimeError('Exception v runneru POZOR') from e
|
||||||
@ -324,7 +326,7 @@ def capsule(target: object, db: object, inter_batch_params: dict = None):
|
|||||||
i.run_instance = None
|
i.run_instance = None
|
||||||
i.run_pause_ev = None
|
i.run_pause_ev = None
|
||||||
i.run_stop_ev = None
|
i.run_stop_ev = None
|
||||||
#ukladame radek do historie (pozdeji refactor)
|
#ukladame jen pro zapis exception reasonu
|
||||||
save_history(id=i.strat_id, st=target, runner=i, reason=reason)
|
save_history(id=i.strat_id, st=target, runner=i, reason=reason)
|
||||||
#store in archive header and archive detail
|
#store in archive header and archive detail
|
||||||
archive_runner(runner=i, strat=target, inter_batch_params=inter_batch_params)
|
archive_runner(runner=i, strat=target, inter_batch_params=inter_batch_params)
|
||||||
@ -453,8 +455,14 @@ def batch_run_manager(id: UUID, runReq: RunRequest, rundays: list[RunDay]):
|
|||||||
#promenna pro sdileni mezi runy jednotlivych batchů (např. daily profit)
|
#promenna pro sdileni mezi runy jednotlivych batchů (např. daily profit)
|
||||||
inter_batch_params = dict(batch_profit=0, batch_rel_profit=0)
|
inter_batch_params = dict(batch_profit=0, batch_rel_profit=0)
|
||||||
note_from_run_request = runReq.note
|
note_from_run_request = runReq.note
|
||||||
|
first = None
|
||||||
|
last = None
|
||||||
for day in rundays:
|
for day in rundays:
|
||||||
cnt += 1
|
cnt += 1
|
||||||
|
if cnt == 1:
|
||||||
|
first = day.start
|
||||||
|
elif cnt == cnt_max:
|
||||||
|
last = day.end
|
||||||
print("Datum od", day.start)
|
print("Datum od", day.start)
|
||||||
print("Datum do", day.end)
|
print("Datum do", day.end)
|
||||||
runReq.bt_from = day.start
|
runReq.bt_from = day.start
|
||||||
@ -468,6 +476,21 @@ def batch_run_manager(id: UUID, runReq: RunRequest, rundays: list[RunDay]):
|
|||||||
break
|
break
|
||||||
|
|
||||||
print("Batch manager FINISHED")
|
print("Batch manager FINISHED")
|
||||||
|
##TBD sem zapsat do hlavicky batchů! abych měl náhled - od,do,profit, metrics
|
||||||
|
batch_abs_profit = 0
|
||||||
|
batch_rel_profit = 0
|
||||||
|
try:
|
||||||
|
#print(inter_batch_params)
|
||||||
|
batch_abs_profit = inter_batch_params["batch_profit"]
|
||||||
|
batch_rel_profit = inter_batch_params["batch_rel_profit"]
|
||||||
|
except Exception as e:
|
||||||
|
print("inter batch params problem", inter_batch_params, str(e)+format_exc())
|
||||||
|
|
||||||
|
for i in db.stratins:
|
||||||
|
if str(i.id) == str(id):
|
||||||
|
i.history += "\nBatch: "+str(batch_id)+" "+str(first)+" "+str(last)+" P:"+str(int(batch_abs_profit))+ "R:"+str(round(batch_rel_profit,4))
|
||||||
|
#i.history += str(runner.__dict__)+"<BR>"
|
||||||
|
db.save()
|
||||||
|
|
||||||
|
|
||||||
#stratin run
|
#stratin run
|
||||||
@ -643,13 +666,14 @@ def populate_metrics_output_directory(strat: StrategyInstance, inter_batch_param
|
|||||||
|
|
||||||
#naplneni batch sum profitu
|
#naplneni batch sum profitu
|
||||||
if inter_batch_params is not None:
|
if inter_batch_params is not None:
|
||||||
res["profit"]["batch_sum_profit"] = inter_batch_params["batch_profit"]
|
res["profit"]["batch_sum_profit"] = int(inter_batch_params["batch_profit"])
|
||||||
res["profit"]["batch_sum_rel_profit"] = inter_batch_params["batch_rel_profit"]
|
res["profit"]["batch_sum_rel_profit"] = inter_batch_params["batch_rel_profit"]
|
||||||
|
|
||||||
#rel_profit rozepsane zisky
|
|
||||||
res["profit"]["rel_profits"] = strat.state.rel_profit_cum
|
|
||||||
#rel_profit zprumerovane
|
#rel_profit zprumerovane
|
||||||
res["profit"]["rel_profit_cum"] = float(np.mean(strat.state.rel_profit_cum)) if len(strat.state.rel_profit_cum) > 0 else 0
|
res["profit"]["daily_rel_profit_avg"] = float(np.mean(strat.state.rel_profit_cum)) if len(strat.state.rel_profit_cum) > 0 else 0
|
||||||
|
#rel_profit rozepsane zisky
|
||||||
|
res["profit"]["daily_rel_profit_list"] = strat.state.rel_profit_cum
|
||||||
|
|
||||||
|
|
||||||
#metrikz z prescribedTrades, pokud existuji
|
#metrikz z prescribedTrades, pokud existuji
|
||||||
try:
|
try:
|
||||||
@ -1117,10 +1141,10 @@ def get_testlists():
|
|||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
#WIP - instant indicators
|
#WIP - instant indicators
|
||||||
def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator):
|
def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator, save: bool = True):
|
||||||
try:
|
try:
|
||||||
if indicator.name is None:
|
if indicator.name is None:
|
||||||
return (-2, "name is required")
|
return (-2, ["name is required"])
|
||||||
|
|
||||||
#print("na zacatku", indicator.toml)
|
#print("na zacatku", indicator.toml)
|
||||||
|
|
||||||
@ -1163,16 +1187,6 @@ def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator):
|
|||||||
if res < 0:
|
if res < 0:
|
||||||
return (-2, "no archived runner {id}")
|
return (-2, "no archived runner {id}")
|
||||||
|
|
||||||
#TODO - conditional udelat podminku
|
|
||||||
# if value == "conditional":
|
|
||||||
# conditions = state.vars.indicators[indname]["cp"]["conditions"]
|
|
||||||
# for condname,condsettings in conditions.items():
|
|
||||||
# state.vars.indicators[indname]["cp"]["conditions"][condname]["cond_dict"] = get_conditions_from_configuration(action=KW.change_val+"_if", section=condsettings)
|
|
||||||
# printanyway(f'creating workdict for {condname} value {state.vars.indicators[indname]["cp"]["conditions"][condname]["cond_dict"]}')
|
|
||||||
|
|
||||||
#TODO - podporit i jine nez custom?
|
|
||||||
|
|
||||||
|
|
||||||
detail = RunArchiveDetail(**val)
|
detail = RunArchiveDetail(**val)
|
||||||
#print("toto jsme si dotahnuli", detail.bars)
|
#print("toto jsme si dotahnuli", detail.bars)
|
||||||
|
|
||||||
@ -1192,7 +1206,7 @@ def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator):
|
|||||||
|
|
||||||
##dame nastaveni indikatoru do tvaru, ktery stratvars ocekava (pro dynmaicke inicializace)
|
##dame nastaveni indikatoru do tvaru, ktery stratvars ocekava (pro dynmaicke inicializace)
|
||||||
stratvars = AttributeDict(indicators=AttributeDict(**{jmeno:toml_parsed}))
|
stratvars = AttributeDict(indicators=AttributeDict(**{jmeno:toml_parsed}))
|
||||||
print("stratvars", stratvars)
|
#print("stratvars", stratvars)
|
||||||
|
|
||||||
state = StrategyState(name="XX", symbol = "X", stratvars = AttributeDict(**stratvars), interface=interface)
|
state = StrategyState(name="XX", symbol = "X", stratvars = AttributeDict(**stratvars), interface=interface)
|
||||||
|
|
||||||
@ -1203,6 +1217,10 @@ def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator):
|
|||||||
state.bars = new_bars
|
state.bars = new_bars
|
||||||
state.indicators = new_inds
|
state.indicators = new_inds
|
||||||
|
|
||||||
|
#pridavame dailyBars z extData
|
||||||
|
if hasattr(detail, "ext_data") and "dailyBars" in detail.ext_data:
|
||||||
|
state.dailyBars = detail.ext_data["dailyBars"]
|
||||||
|
#print("daiyl bars added to state.dailyBars", state.dailyBars)
|
||||||
print("delka",len(detail.bars["close"]))
|
print("delka",len(detail.bars["close"]))
|
||||||
|
|
||||||
#intitialize indicator mapping - in order to use eval in expression
|
#intitialize indicator mapping - in order to use eval in expression
|
||||||
@ -1230,7 +1248,8 @@ def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator):
|
|||||||
for key in detail.indicators[0]:
|
for key in detail.indicators[0]:
|
||||||
state.indicators[key].append(detail.indicators[0][key][i])
|
state.indicators[key].append(detail.indicators[0][key][i])
|
||||||
|
|
||||||
new_inds[indicator.name].append(0)
|
#inicializujeme 0 v novém indikatoru
|
||||||
|
state.indicators[indicator.name].append(0)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
populate_dynamic_indicators(new_data, state)
|
populate_dynamic_indicators(new_data, state)
|
||||||
@ -1241,8 +1260,7 @@ def preview_indicator_byTOML(id: UUID, indicator: InstantIndicator):
|
|||||||
print(str(e) + format_exc())
|
print(str(e) + format_exc())
|
||||||
|
|
||||||
|
|
||||||
print("Done - static", f"delka {len(state.indicators[indicator.name])}", state.indicators[indicator.name])
|
#print("Done", state.indicators[indicator.name])
|
||||||
#print("Done", f"delka {len(new_inds[indicator.name])}", new_inds[indicator.name])
|
|
||||||
|
|
||||||
new_inds[indicator.name] = state.indicators[indicator.name]
|
new_inds[indicator.name] = state.indicators[indicator.name]
|
||||||
|
|
||||||
|
|||||||
@ -59,6 +59,7 @@ def zlema(data: Any, period: int = 50, use_series=False) -> Any:
|
|||||||
return pd.Series(zlema) if use_series else zlema
|
return pd.Series(zlema) if use_series else zlema
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def sma(data: Any, period: int = 50, use_series=False) -> Any:
|
def sma(data: Any, period: int = 50, use_series=False) -> Any:
|
||||||
"""
|
"""
|
||||||
Finding the moving average of a dataset
|
Finding the moving average of a dataset
|
||||||
@ -80,8 +81,14 @@ def hma(data: Any, period: int = 50, use_series=False) -> Any:
|
|||||||
hma = ti.hma(data, period)
|
hma = ti.hma(data, period)
|
||||||
return pd.Series(hma) if use_series else hma
|
return pd.Series(hma) if use_series else hma
|
||||||
|
|
||||||
|
def linreg(data: Any, period: int = 50, use_series=False) -> Any:
|
||||||
|
if check_series(data):
|
||||||
|
use_series = True
|
||||||
|
data = convert_to_numpy(data)
|
||||||
|
linreg = ti.linreg(data, period)
|
||||||
|
return pd.Series(linreg) if use_series else linreg
|
||||||
|
|
||||||
def kaufman_adaptive_ma(data: Any, period: int = 50, use_series=False) -> Any:
|
def kama(data: Any, period: int = 50, use_series=False) -> Any:
|
||||||
if check_series(data):
|
if check_series(data):
|
||||||
use_series = True
|
use_series = True
|
||||||
data = convert_to_numpy(data)
|
data = convert_to_numpy(data)
|
||||||
|
|||||||
@ -369,6 +369,11 @@ class TradeAggregator2Queue(TradeAggregator):
|
|||||||
self.queue = queue
|
self.queue = queue
|
||||||
self.symbol = symbol
|
self.symbol = symbol
|
||||||
|
|
||||||
|
#accepts loaded queue and sents it to given output
|
||||||
|
async def ingest_cached(self, cached_queue):
|
||||||
|
for element in cached_queue:
|
||||||
|
self.queue.put(element)
|
||||||
|
|
||||||
async def ingest_trade(self, data):
|
async def ingest_trade(self, data):
|
||||||
#print("ingest ve threadu:",current_thread().name)
|
#print("ingest ve threadu:",current_thread().name)
|
||||||
res = await super().ingest_trade(data, self.symbol)
|
res = await super().ingest_trade(data, self.symbol)
|
||||||
@ -400,6 +405,11 @@ class TradeAggregator2List(TradeAggregator):
|
|||||||
# if os.path.exists(self.debugfile):
|
# if os.path.exists(self.debugfile):
|
||||||
# os.remove(self.debugfile)
|
# os.remove(self.debugfile)
|
||||||
|
|
||||||
|
#accepts loaded queue and sents it to given output
|
||||||
|
async def ingest_cached(self, cached_queue):
|
||||||
|
for element in cached_queue:
|
||||||
|
self.btdata.append((element['t'],element['p']))
|
||||||
|
|
||||||
async def ingest_trade(self, data):
|
async def ingest_trade(self, data):
|
||||||
#print("ted vstoupil do tradeagg2list ingestu")
|
#print("ted vstoupil do tradeagg2list ingestu")
|
||||||
res1 = await super().ingest_trade(data, self.symbol)
|
res1 = await super().ingest_trade(data, self.symbol)
|
||||||
|
|||||||
63
v2realbot/loader/cacher.py
Normal file
63
v2realbot/loader/cacher.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
from v2realbot.loader.aggregator import TradeAggregator, TradeAggregator2List, TradeAggregator2Queue
|
||||||
|
from alpaca.trading.requests import GetCalendarRequest
|
||||||
|
from alpaca.trading.client import TradingClient
|
||||||
|
from alpaca.data.live import StockDataStream
|
||||||
|
from v2realbot.config import ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, DATA_DIR, OFFLINE_MODE
|
||||||
|
from alpaca.data.enums import DataFeed
|
||||||
|
from alpaca.data.historical import StockHistoricalDataClient
|
||||||
|
from alpaca.data.requests import StockLatestQuoteRequest, StockBarsRequest, StockTradesRequest
|
||||||
|
from threading import Thread, current_thread
|
||||||
|
from v2realbot.utils.utils import parse_alpaca_timestamp, ltp, zoneNY, print
|
||||||
|
from v2realbot.utils.tlog import tlog
|
||||||
|
from datetime import datetime, timedelta, date
|
||||||
|
from threading import Thread
|
||||||
|
import asyncio
|
||||||
|
from msgpack.ext import Timestamp
|
||||||
|
from msgpack import packb
|
||||||
|
from pandas import to_datetime
|
||||||
|
import pickle
|
||||||
|
import os
|
||||||
|
from rich import print
|
||||||
|
import queue
|
||||||
|
from alpaca.trading.models import Calendar
|
||||||
|
from v2realbot.enums.enums import RecordType, StartBarAlign
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from v2realbot.utils.utils import parse_alpaca_timestamp, ltp, Queue,is_open_hours,zoneNY
|
||||||
|
from queue import Queue
|
||||||
|
from rich import print
|
||||||
|
from v2realbot.enums.enums import Mode
|
||||||
|
import threading
|
||||||
|
|
||||||
|
class Cacher:
|
||||||
|
def __init__(self,
|
||||||
|
|
||||||
|
rectype: RecordType = RecordType.BAR,
|
||||||
|
timeframe: int = 5,
|
||||||
|
minsize: int = 100,
|
||||||
|
update_ltp: bool = False,
|
||||||
|
align: StartBarAlign = StartBarAlign.ROUND,
|
||||||
|
mintick: int = 0,
|
||||||
|
exthours: bool = False):
|
||||||
|
#vstupuje seznam aggregatoru - obvykle 1 pro queue, 1 pro backtest engine
|
||||||
|
def get_cached_agg_data(agg_list, open, close):
|
||||||
|
file_path = DATA_DIR + "/cache/"+populate_file_name(agg_list[0], open, close)
|
||||||
|
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
##denní file existuje
|
||||||
|
#loadujeme ze souboru
|
||||||
|
#pokud je start_time < trade < end_time
|
||||||
|
#odesíláme do queue
|
||||||
|
#jinak pass
|
||||||
|
with open (file_path, 'rb') as fp:
|
||||||
|
agg_data = pickle.load(fp)
|
||||||
|
print("Loading AGGREGATED DATA from CACHE", file_path)
|
||||||
|
|
||||||
|
return agg_data
|
||||||
|
|
||||||
|
def store_cache_agg_data(aggregator, open, close):
|
||||||
|
pass
|
||||||
|
#ulozi data do fajlu
|
||||||
|
|
||||||
|
def populate_file_name(aggregator, open, close):
|
||||||
|
aggregated_file = aggregator.symbol + '-' + str(aggregator.rectype) + "-" + aggregator.timeframe + "-" + aggregator.minsize + "-" + aggregator.align + aggregator.mintick + str(aggregator.exthours) + '-' + str(int(open.timestamp())) + '-' + str(int(close.timestamp())) + '.cache'
|
||||||
|
return aggregated_file
|
||||||
@ -1,4 +1,5 @@
|
|||||||
from v2realbot.loader.aggregator import TradeAggregator, TradeAggregator2List, TradeAggregator2Queue
|
from v2realbot.loader.aggregator import TradeAggregator, TradeAggregator2List, TradeAggregator2Queue
|
||||||
|
#from v2realbot.loader.cacher import get_cached_agg_data
|
||||||
from alpaca.trading.requests import GetCalendarRequest
|
from alpaca.trading.requests import GetCalendarRequest
|
||||||
from alpaca.trading.client import TradingClient
|
from alpaca.trading.client import TradingClient
|
||||||
from alpaca.data.live import StockDataStream
|
from alpaca.data.live import StockDataStream
|
||||||
@ -132,6 +133,21 @@ class Trade_Offline_Streamer(Thread):
|
|||||||
print("time_to je pred zacatkem marketu. Vynechavame tento den.")
|
print("time_to je pred zacatkem marketu. Vynechavame tento den.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
#check if we have aggregated data in cache
|
||||||
|
|
||||||
|
#agg dat found, load it from file
|
||||||
|
#and call cacher
|
||||||
|
#trade daily file
|
||||||
|
|
||||||
|
#vstupuje pole agregatoru, open, close daneho dne
|
||||||
|
#cached_aggregated_data = get_cached_agg_data(self.to_run[symbpole[0]], day.open, day.close)
|
||||||
|
|
||||||
|
# if cached_aggregated_data is not None:
|
||||||
|
# #poslu agregovana data do ingest cache aggregatorů pro přeposlání do jednotlivých kanálů
|
||||||
|
|
||||||
|
|
||||||
|
#trade daily file
|
||||||
daily_file = str(symbpole[0]) + '-' + str(int(day.open.timestamp())) + '-' + str(int(day.close.timestamp())) + '.cache'
|
daily_file = str(symbpole[0]) + '-' + str(int(day.open.timestamp())) + '-' + str(int(day.close.timestamp())) + '.cache'
|
||||||
print(daily_file)
|
print(daily_file)
|
||||||
file_path = DATA_DIR + "/"+daily_file
|
file_path = DATA_DIR + "/"+daily_file
|
||||||
|
|||||||
@ -1,388 +1,389 @@
|
|||||||
from sklearn.preprocessing import StandardScaler
|
# from sklearn.preprocessing import StandardScaler
|
||||||
from keras.models import Sequential
|
# # from keras.models import Sequential
|
||||||
from v2realbot.enums.enums import PredOutput, Source, TargetTRFM
|
# from v2realbot.enums.enums import PredOutput, Source, TargetTRFM
|
||||||
from v2realbot.config import DATA_DIR
|
# from v2realbot.config import DATA_DIR
|
||||||
from joblib import dump
|
# from joblib import dump
|
||||||
import v2realbot.ml.mlutils as mu
|
# # import v2realbot.ml.mlutils as mu
|
||||||
from v2realbot.utils.utils import slice_dict_lists
|
# from v2realbot.utils.utils import slice_dict_lists
|
||||||
import numpy as np
|
# import numpy as np
|
||||||
from copy import deepcopy
|
# from copy import deepcopy
|
||||||
import v2realbot.controller.services as cs
|
# import v2realbot.controller.services as cs
|
||||||
#Basic classes for machine learning
|
# #Basic classes for machine learning
|
||||||
#drzi model a jeho zakladni nastaveni
|
# #drzi model a jeho zakladni nastaveni
|
||||||
|
|
||||||
#Sample Data
|
# #Sample Data
|
||||||
sample_bars = {
|
# sample_bars = {
|
||||||
'time': [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15],
|
# 'time': [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15],
|
||||||
'high': [10, 11, 12, 13, 14,10, 11, 12, 13, 14,10, 11, 12, 13, 14],
|
# 'high': [10, 11, 12, 13, 14,10, 11, 12, 13, 14,10, 11, 12, 13, 14],
|
||||||
'low': [8, 9, 7, 6, 8,8, 9, 7, 6, 8,8, 9, 7, 6, 8],
|
# 'low': [8, 9, 7, 6, 8,8, 9, 7, 6, 8,8, 9, 7, 6, 8],
|
||||||
'volume': [1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300],
|
# 'volume': [1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300],
|
||||||
'close': [9, 10, 11, 12, 13,9, 10, 11, 12, 13,9, 10, 11, 12, 13],
|
# 'close': [9, 10, 11, 12, 13,9, 10, 11, 12, 13,9, 10, 11, 12, 13],
|
||||||
'open': [9, 10, 8, 8, 8,9, 10, 8, 8, 8,9, 10, 8, 8, 8],
|
# 'open': [9, 10, 8, 8, 8,9, 10, 8, 8, 8,9, 10, 8, 8, 8],
|
||||||
'resolution': [1, 1, 1, 1, 1,1, 1, 1, 1, 1,1, 1, 1, 1, 1]
|
# 'resolution': [1, 1, 1, 1, 1,1, 1, 1, 1, 1,1, 1, 1, 1, 1]
|
||||||
}
|
# }
|
||||||
|
|
||||||
sample_indicators = {
|
# sample_indicators = {
|
||||||
'time': [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15],
|
# 'time': [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15],
|
||||||
'fastslope': [90, 95, 100, 110, 115,90, 95, 100, 110, 115,90, 95, 100, 110, 115],
|
# 'fastslope': [90, 95, 100, 110, 115,90, 95, 100, 110, 115,90, 95, 100, 110, 115],
|
||||||
'fsdelta': [90, 95, 100, 110, 115,90, 95, 100, 110, 115,90, 95, 100, 110, 115],
|
# 'fsdelta': [90, 95, 100, 110, 115,90, 95, 100, 110, 115,90, 95, 100, 110, 115],
|
||||||
'fastslope2': [90, 95, 100, 110, 115,90, 95, 100, 110, 115,90, 95, 100, 110, 115],
|
# 'fastslope2': [90, 95, 100, 110, 115,90, 95, 100, 110, 115,90, 95, 100, 110, 115],
|
||||||
'ema': [1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300]
|
# 'ema': [1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300,1000, 1200, 900, 1100, 1300]
|
||||||
}
|
# }
|
||||||
|
|
||||||
#Trida, která drzi instanci ML modelu a jeho konfigurace
|
# #Trida, která drzi instanci ML modelu a jeho konfigurace
|
||||||
#take se pouziva jako nastroj na pripravu dat pro train a predikci
|
# #take se pouziva jako nastroj na pripravu dat pro train a predikci
|
||||||
#pozor samotna data trida neobsahuje, jen konfiguraci a pak samotny model
|
# #pozor samotna data trida neobsahuje, jen konfiguraci a pak samotny model
|
||||||
class ModelML:
|
# class ModelML:
|
||||||
def __init__(self, name: str,
|
# def __init__(self, name: str,
|
||||||
pred_output: PredOutput,
|
# pred_output: PredOutput,
|
||||||
bar_features: list,
|
# bar_features: list,
|
||||||
ind_features: list,
|
# ind_features: list,
|
||||||
input_sequences: int,
|
# input_sequences: int,
|
||||||
target: str,
|
# target: str,
|
||||||
target_reference: str,
|
# target_reference: str,
|
||||||
train_target_steps: int, #train
|
# train_target_steps: int, #train
|
||||||
train_target_transformation: TargetTRFM, #train
|
# train_target_transformation: TargetTRFM, #train
|
||||||
train_epochs: int, #train
|
# train_epochs: int, #train
|
||||||
train_runner_ids: list = None, #train
|
# train_runner_ids: list = None, #train
|
||||||
train_batch_id: str = None, #train
|
# train_batch_id: str = None, #train
|
||||||
version: str = "1",
|
# version: str = "1",
|
||||||
note : str = None,
|
# note : str = None,
|
||||||
use_bars: bool = True,
|
# use_bars: bool = True,
|
||||||
train_remove_cross_sequences: bool = False, #train
|
# train_remove_cross_sequences: bool = False, #train
|
||||||
#standardne StandardScaler
|
# #standardne StandardScaler
|
||||||
scalerX: StandardScaler = StandardScaler(),
|
# scalerX: StandardScaler = StandardScaler(),
|
||||||
scalerY: StandardScaler = StandardScaler(),
|
# scalerY: StandardScaler = StandardScaler(),
|
||||||
model: Sequential = Sequential()) -> None:
|
# model, #Sequential = Sequential()
|
||||||
|
# )-> None:
|
||||||
|
|
||||||
self.name = name
|
# self.name = name
|
||||||
self.version = version
|
# self.version = version
|
||||||
self.note = note
|
# self.note = note
|
||||||
self.pred_output: PredOutput = pred_output
|
# self.pred_output: PredOutput = pred_output
|
||||||
#model muze byt take bez barů, tzn. jen indikatory
|
# #model muze byt take bez barů, tzn. jen indikatory
|
||||||
self.use_bars = use_bars
|
# self.use_bars = use_bars
|
||||||
#zajistime poradi
|
# #zajistime poradi
|
||||||
bar_features.sort()
|
# bar_features.sort()
|
||||||
ind_features.sort()
|
# ind_features.sort()
|
||||||
self.bar_features = bar_features
|
# self.bar_features = bar_features
|
||||||
self.ind_features = ind_features
|
# self.ind_features = ind_features
|
||||||
if (train_runner_ids is None or len(train_runner_ids) == 0) and train_batch_id is None:
|
# if (train_runner_ids is None or len(train_runner_ids) == 0) and train_batch_id is None:
|
||||||
raise Exception("train_runner_ids nebo train_batch_id musi byt vyplnene")
|
# raise Exception("train_runner_ids nebo train_batch_id musi byt vyplnene")
|
||||||
self.train_runner_ids = train_runner_ids
|
# self.train_runner_ids = train_runner_ids
|
||||||
self.train_batch_id = train_batch_id
|
# self.train_batch_id = train_batch_id
|
||||||
#target cílový sloupec, který je používám přímo nebo transformován na binary
|
# #target cílový sloupec, který je používám přímo nebo transformován na binary
|
||||||
self.target = target
|
# self.target = target
|
||||||
self.target_reference = target_reference
|
# self.target_reference = target_reference
|
||||||
self.train_target_steps = train_target_steps
|
# self.train_target_steps = train_target_steps
|
||||||
self.train_target_transformation = train_target_transformation
|
# self.train_target_transformation = train_target_transformation
|
||||||
self.input_sequences = input_sequences
|
# self.input_sequences = input_sequences
|
||||||
self.train_epochs = train_epochs
|
# self.train_epochs = train_epochs
|
||||||
#keep cross sequences between runners
|
# #keep cross sequences between runners
|
||||||
self.train_remove_cross_sequences = train_remove_cross_sequences
|
# self.train_remove_cross_sequences = train_remove_cross_sequences
|
||||||
self.scalerX = scalerX
|
# self.scalerX = scalerX
|
||||||
self.scalerY = scalerY
|
# self.scalerY = scalerY
|
||||||
self.model = model
|
# self.model = model
|
||||||
|
|
||||||
def save(self):
|
# def save(self):
|
||||||
filename = mu.get_full_filename(self.name,self.version)
|
# filename = mu.get_full_filename(self.name,self.version)
|
||||||
dump(self, filename)
|
# dump(self, filename)
|
||||||
print(f"model {self.name} save")
|
# print(f"model {self.name} save")
|
||||||
|
|
||||||
#create X data with features
|
# #create X data with features
|
||||||
def column_stack_source(self, bars, indicators, verbose = 1) -> np.array:
|
# def column_stack_source(self, bars, indicators, verbose = 1) -> np.array:
|
||||||
#create SOURCE DATA with features
|
# #create SOURCE DATA with features
|
||||||
# bars and indicators dictionary and features as input
|
# # bars and indicators dictionary and features as input
|
||||||
poradi_sloupcu_inds = [feature for feature in self.ind_features if feature in indicators]
|
# poradi_sloupcu_inds = [feature for feature in self.ind_features if feature in indicators]
|
||||||
indicator_data = np.column_stack([indicators[feature] for feature in self.ind_features if feature in indicators])
|
# indicator_data = np.column_stack([indicators[feature] for feature in self.ind_features if feature in indicators])
|
||||||
|
|
||||||
if len(bars)>0:
|
# if len(bars)>0:
|
||||||
bar_data = np.column_stack([bars[feature] for feature in self.bar_features if feature in bars])
|
# bar_data = np.column_stack([bars[feature] for feature in self.bar_features if feature in bars])
|
||||||
poradi_sloupcu_bars = [feature for feature in self.bar_features if feature in bars]
|
# poradi_sloupcu_bars = [feature for feature in self.bar_features if feature in bars]
|
||||||
if verbose == 1:
|
# if verbose == 1:
|
||||||
print("poradi sloupce v source_data", str(poradi_sloupcu_bars + poradi_sloupcu_inds))
|
# print("poradi sloupce v source_data", str(poradi_sloupcu_bars + poradi_sloupcu_inds))
|
||||||
combined_day_data = np.column_stack([bar_data,indicator_data])
|
# combined_day_data = np.column_stack([bar_data,indicator_data])
|
||||||
else:
|
# else:
|
||||||
combined_day_data = indicator_data
|
# combined_day_data = indicator_data
|
||||||
if verbose == 1:
|
# if verbose == 1:
|
||||||
print("poradi sloupce v source_data", str(poradi_sloupcu_inds))
|
# print("poradi sloupce v source_data", str(poradi_sloupcu_inds))
|
||||||
return combined_day_data
|
# return combined_day_data
|
||||||
|
|
||||||
#create TARGET(Y) data
|
# #create TARGET(Y) data
|
||||||
def column_stack_target(self, bars, indicators) -> np.array:
|
# def column_stack_target(self, bars, indicators) -> np.array:
|
||||||
target_base = []
|
# target_base = []
|
||||||
target_reference = []
|
# target_reference = []
|
||||||
try:
|
# try:
|
||||||
try:
|
# try:
|
||||||
target_base = bars[self.target]
|
# target_base = bars[self.target]
|
||||||
except KeyError:
|
# except KeyError:
|
||||||
target_base = indicators[self.target]
|
# target_base = indicators[self.target]
|
||||||
try:
|
# try:
|
||||||
target_reference = bars[self.target_reference]
|
# target_reference = bars[self.target_reference]
|
||||||
except KeyError:
|
# except KeyError:
|
||||||
target_reference = indicators[self.target_reference]
|
# target_reference = indicators[self.target_reference]
|
||||||
except KeyError:
|
# except KeyError:
|
||||||
pass
|
# pass
|
||||||
target_day_data = np.column_stack([target_base, target_reference])
|
# target_day_data = np.column_stack([target_base, target_reference])
|
||||||
return target_day_data
|
# return target_day_data
|
||||||
|
|
||||||
def load_runners_as_list(self, runner_id_list = None, batch_id = None):
|
# def load_runners_as_list(self, runner_id_list = None, batch_id = None):
|
||||||
"""Loads all runners data (bars, indicators) for given runners into list of dicts.
|
# """Loads all runners data (bars, indicators) for given runners into list of dicts.
|
||||||
|
|
||||||
List of runners/train_batch_id may be provided, or self.train_runner_ids/train_batch_id is taken instead.
|
# List of runners/train_batch_id may be provided, or self.train_runner_ids/train_batch_id is taken instead.
|
||||||
|
|
||||||
Returns:
|
# Returns:
|
||||||
tuple (barslist, indicatorslist,) - lists with dictionaries for each runner
|
# tuple (barslist, indicatorslist,) - lists with dictionaries for each runner
|
||||||
"""
|
# """
|
||||||
if runner_id_list is not None:
|
# if runner_id_list is not None:
|
||||||
runner_ids = runner_id_list
|
# runner_ids = runner_id_list
|
||||||
print("loading runners for ",str(runner_id_list))
|
# print("loading runners for ",str(runner_id_list))
|
||||||
elif batch_id is not None:
|
# elif batch_id is not None:
|
||||||
print("Loading runners for train_batch_id:", batch_id)
|
# print("Loading runners for train_batch_id:", batch_id)
|
||||||
res, runner_ids = cs.get_archived_runnerslist_byBatchID(batch_id)
|
# res, runner_ids = cs.get_archived_runnerslist_byBatchID(batch_id)
|
||||||
elif self.train_batch_id is not None:
|
# elif self.train_batch_id is not None:
|
||||||
print("Loading runners for TRAINING BATCH self.train_batch_id:", self.train_batch_id)
|
# print("Loading runners for TRAINING BATCH self.train_batch_id:", self.train_batch_id)
|
||||||
res, runner_ids = cs.get_archived_runnerslist_byBatchID(self.train_batch_id)
|
# res, runner_ids = cs.get_archived_runnerslist_byBatchID(self.train_batch_id)
|
||||||
#pripadne bereme z listu runneru
|
# #pripadne bereme z listu runneru
|
||||||
else:
|
# else:
|
||||||
runner_ids = self.train_runner_ids
|
# runner_ids = self.train_runner_ids
|
||||||
print("loading runners for TRAINING runners ",str(self.train_runner_ids))
|
# print("loading runners for TRAINING runners ",str(self.train_runner_ids))
|
||||||
|
|
||||||
|
|
||||||
barslist = []
|
# barslist = []
|
||||||
indicatorslist = []
|
# indicatorslist = []
|
||||||
ind_keys = None
|
# ind_keys = None
|
||||||
for runner_id in runner_ids:
|
# for runner_id in runner_ids:
|
||||||
bars, indicators = mu.load_runner(runner_id)
|
# bars, indicators = mu.load_runner(runner_id)
|
||||||
print(f"runner:{runner_id}")
|
# print(f"runner:{runner_id}")
|
||||||
if self.use_bars:
|
# if self.use_bars:
|
||||||
barslist.append(bars)
|
# barslist.append(bars)
|
||||||
print(f"bars keys {len(bars)} lng {len(bars[self.bar_features[0]])}")
|
# print(f"bars keys {len(bars)} lng {len(bars[self.bar_features[0]])}")
|
||||||
indicatorslist.append(indicators)
|
# indicatorslist.append(indicators)
|
||||||
print(f"indi keys {len(indicators)} lng {len(indicators[self.ind_features[0]])}")
|
# print(f"indi keys {len(indicators)} lng {len(indicators[self.ind_features[0]])}")
|
||||||
if ind_keys is not None and ind_keys != len(indicators):
|
# if ind_keys is not None and ind_keys != len(indicators):
|
||||||
raise Exception("V runnerech musi byt stejny pocet indikatoru")
|
# raise Exception("V runnerech musi byt stejny pocet indikatoru")
|
||||||
else:
|
# else:
|
||||||
ind_keys = len(indicators)
|
# ind_keys = len(indicators)
|
||||||
|
|
||||||
return barslist, indicatorslist
|
# return barslist, indicatorslist
|
||||||
|
|
||||||
#toto nejspis rozdelit na TRAIN mod (kdy ma smysl si brat nataveni napr. remove cross)
|
# #toto nejspis rozdelit na TRAIN mod (kdy ma smysl si brat nataveni napr. remove cross)
|
||||||
def create_sequences(self, combined_data, target_data = None, remove_cross_sequences: bool = False, rows_in_day = None):
|
# def create_sequences(self, combined_data, target_data = None, remove_cross_sequences: bool = False, rows_in_day = None):
|
||||||
"""Creates sequences of given length seq and optionally target N steps in the future.
|
# """Creates sequences of given length seq and optionally target N steps in the future.
|
||||||
|
|
||||||
Returns X(source) a Y(transformed target) - vrací take Y_untransformed - napr. referencni target column pro zobrazeni v grafu (napr. cenu)
|
# Returns X(source) a Y(transformed target) - vrací take Y_untransformed - napr. referencni target column pro zobrazeni v grafu (napr. cenu)
|
||||||
|
|
||||||
Volby pro transformaci targetu:
|
# Volby pro transformaci targetu:
|
||||||
- KEEPVAL (keep value as is)
|
# - KEEPVAL (keep value as is)
|
||||||
- KEEPVAL_MOVE(keep value, move target N steps in the future)
|
# - KEEPVAL_MOVE(keep value, move target N steps in the future)
|
||||||
|
|
||||||
další na zámysl (nejspíš ale data budu připravovat ve stratu a využívat jen KEEPy nahoře)
|
# další na zámysl (nejspíš ale data budu připravovat ve stratu a využívat jen KEEPy nahoře)
|
||||||
- BINARY_prefix - sloupec založený na podmínce, výsledek je 0,1
|
# - BINARY_prefix - sloupec založený na podmínce, výsledek je 0,1
|
||||||
- BINARY_TREND RISING - podmínka založena, že v target columnu stoupají/klesají po target N steps
|
# - BINARY_TREND RISING - podmínka založena, že v target columnu stoupají/klesají po target N steps
|
||||||
(podvarianty BINARY TREND RISING(0-1), FALLING(0-1), BOTH(-1 - ))
|
# (podvarianty BINARY TREND RISING(0-1), FALLING(0-1), BOTH(-1 - ))
|
||||||
- BINARY_READY - předpřipravený sloupec(vytvořený ve strategii jako indikator), stačí jen posunout o target step
|
# - BINARY_READY - předpřipravený sloupec(vytvořený ve strategii jako indikator), stačí jen posunout o target step
|
||||||
- BINARY_READY_POSUNUTY - předpřipraveny sloupec (již posunutýo o target M) - stačí brát as is
|
# - BINARY_READY_POSUNUTY - předpřipraveny sloupec (již posunutýo o target M) - stačí brát as is
|
||||||
|
|
||||||
Args:
|
# Args:
|
||||||
combined_data: A list of combined data.
|
# combined_data: A list of combined data.
|
||||||
target_data: A list of target data (0-target,1-target ref.column)
|
# target_data: A list of target data (0-target,1-target ref.column)
|
||||||
remove_cross_sequences: If to remove crossday sequences
|
# remove_cross_sequences: If to remove crossday sequences
|
||||||
rows_in_day: helper dict to remove crossday sequences
|
# rows_in_day: helper dict to remove crossday sequences
|
||||||
return_untr: whether to return untransformed reference column
|
# return_untr: whether to return untransformed reference column
|
||||||
|
|
||||||
Returns:
|
# Returns:
|
||||||
A list of X sequences and a list of y sequences.
|
# A list of X sequences and a list of y sequences.
|
||||||
"""
|
# """
|
||||||
|
|
||||||
if remove_cross_sequences is True and rows_in_day is None:
|
# if remove_cross_sequences is True and rows_in_day is None:
|
||||||
raise Exception("To remove crossday sequences, rows_in_day param required.")
|
# raise Exception("To remove crossday sequences, rows_in_day param required.")
|
||||||
|
|
||||||
if target_data is not None and len(target_data) > 0:
|
# if target_data is not None and len(target_data) > 0:
|
||||||
target_data_untr = target_data[:,1]
|
# target_data_untr = target_data[:,1]
|
||||||
target_data = target_data[:,0]
|
# target_data = target_data[:,0]
|
||||||
else:
|
# else:
|
||||||
target_data_untr = []
|
# target_data_untr = []
|
||||||
target_data = []
|
# target_data = []
|
||||||
|
|
||||||
X_train = []
|
# X_train = []
|
||||||
y_train = []
|
# y_train = []
|
||||||
y_untr = []
|
# y_untr = []
|
||||||
#comb data shape (4073, 13)
|
# #comb data shape (4073, 13)
|
||||||
#target shape (4073, 1)
|
# #target shape (4073, 1)
|
||||||
print("Start Sequencing")
|
# print("Start Sequencing")
|
||||||
#range sekvence podle toho jestli je pozadovan MOVE nebo NE
|
# #range sekvence podle toho jestli je pozadovan MOVE nebo NE
|
||||||
if self.train_target_transformation == TargetTRFM.KEEPVAL_MOVE:
|
# if self.train_target_transformation == TargetTRFM.KEEPVAL_MOVE:
|
||||||
right_offset = self.input_sequences + self.train_target_steps
|
# right_offset = self.input_sequences + self.train_target_steps
|
||||||
else:
|
# else:
|
||||||
right_offset= self.input_sequences
|
# right_offset= self.input_sequences
|
||||||
for i in range(len(combined_data) - right_offset):
|
# for i in range(len(combined_data) - right_offset):
|
||||||
|
|
||||||
#take neresime cross sekvence kdyz neni vyplneni target nebo neni vyplnena rowsinaday
|
# #take neresime cross sekvence kdyz neni vyplneni target nebo neni vyplnena rowsinaday
|
||||||
if remove_cross_sequences is True and not self.is_same_day(i,i + right_offset, rows_in_day):
|
# if remove_cross_sequences is True and not self.is_same_day(i,i + right_offset, rows_in_day):
|
||||||
print(f"sekvence vyrazena. NEW Zacatek {combined_data[i, 0]} konec {combined_data[i + right_offset, 0]}")
|
# print(f"sekvence vyrazena. NEW Zacatek {combined_data[i, 0]} konec {combined_data[i + right_offset, 0]}")
|
||||||
continue
|
# continue
|
||||||
|
|
||||||
#pridame sekvenci
|
# #pridame sekvenci
|
||||||
X_train.append(combined_data[i:i + self.input_sequences])
|
# X_train.append(combined_data[i:i + self.input_sequences])
|
||||||
|
|
||||||
#target hodnotu bude ponecha (na radku mame jiz cilovy target)
|
# #target hodnotu bude ponecha (na radku mame jiz cilovy target)
|
||||||
#nebo vezme hodnotu z N(train_target_steps) baru vpredu a da jako target k radku
|
# #nebo vezme hodnotu z N(train_target_steps) baru vpredu a da jako target k radku
|
||||||
#je rizeno nastavenim right_offset vyse
|
# #je rizeno nastavenim right_offset vyse
|
||||||
if target_data is not None and len(target_data) > 0:
|
# if target_data is not None and len(target_data) > 0:
|
||||||
y_train.append(target_data[i + right_offset])
|
# y_train.append(target_data[i + right_offset])
|
||||||
|
|
||||||
#udela binary transformaci targetu
|
# #udela binary transformaci targetu
|
||||||
# elif self.target_transformation == TargetTRFM.BINARY_TREND_UP:
|
# # elif self.target_transformation == TargetTRFM.BINARY_TREND_UP:
|
||||||
# #mini loop od 0 do počtu target steps - zda jsou successively rising
|
# # #mini loop od 0 do počtu target steps - zda jsou successively rising
|
||||||
# #radeji budu resit vizualne conditional indikatorem pri priprave dat
|
# # #radeji budu resit vizualne conditional indikatorem pri priprave dat
|
||||||
# rising = False
|
# # rising = False
|
||||||
# for step in range(0,self.train_target_steps):
|
# # for step in range(0,self.train_target_steps):
|
||||||
# if target_data[i + self.input_sequences + step] < target_data[i + self.input_sequences + step + 1]:
|
# # if target_data[i + self.input_sequences + step] < target_data[i + self.input_sequences + step + 1]:
|
||||||
# rising = True
|
# # rising = True
|
||||||
# else:
|
# # else:
|
||||||
# rising = False
|
# # rising = False
|
||||||
# break
|
# # break
|
||||||
# y_train.append([1] if rising else [0])
|
# # y_train.append([1] if rising else [0])
|
||||||
# #tato zakomentovana varianta porovnava jen cenu ted a cenu na target baru
|
# # #tato zakomentovana varianta porovnava jen cenu ted a cenu na target baru
|
||||||
# #y_train.append([1] if target_data[i + self.input_sequences] < target_data[i + self.input_sequences + self.train_target_steps] else [0])
|
# # #y_train.append([1] if target_data[i + self.input_sequences] < target_data[i + self.input_sequences + self.train_target_steps] else [0])
|
||||||
if target_data is not None and len(target_data) > 0:
|
# if target_data is not None and len(target_data) > 0:
|
||||||
y_untr.append(target_data_untr[i + self.input_sequences])
|
# y_untr.append(target_data_untr[i + self.input_sequences])
|
||||||
return np.array(X_train), np.array(y_train), np.array(y_untr)
|
# return np.array(X_train), np.array(y_train), np.array(y_untr)
|
||||||
|
|
||||||
def is_same_day(self, idx_start, idx_end, rows_in_day):
|
# def is_same_day(self, idx_start, idx_end, rows_in_day):
|
||||||
"""Helper for sequencing enables to recognize if the start/end index are from the same day.
|
# """Helper for sequencing enables to recognize if the start/end index are from the same day.
|
||||||
|
|
||||||
Used for sequences to remove cross runner(day) sequences.
|
# Used for sequences to remove cross runner(day) sequences.
|
||||||
|
|
||||||
Args:
|
# Args:
|
||||||
idx_start: Start index
|
# idx_start: Start index
|
||||||
idx_end: End index
|
# idx_end: End index
|
||||||
rows_in_day: 1D array containing number of rows(bars,inds) for each day.
|
# rows_in_day: 1D array containing number of rows(bars,inds) for each day.
|
||||||
Cumsumed defines edges where each day ends. [10,30,60]
|
# Cumsumed defines edges where each day ends. [10,30,60]
|
||||||
|
|
||||||
Returns:
|
# Returns:
|
||||||
A boolean
|
# A boolean
|
||||||
|
|
||||||
refactor to vectors if possible
|
# refactor to vectors if possible
|
||||||
i_b, i_e
|
# i_b, i_e
|
||||||
podm_pole = i_b<pole and i_s >= pole
|
# podm_pole = i_b<pole and i_s >= pole
|
||||||
[10,30,60]
|
# [10,30,60]
|
||||||
"""
|
# """
|
||||||
for i in rows_in_day:
|
# for i in rows_in_day:
|
||||||
#jde o polozku na pomezi - vyhazujeme
|
# #jde o polozku na pomezi - vyhazujeme
|
||||||
if idx_start < i and idx_end >= i:
|
# if idx_start < i and idx_end >= i:
|
||||||
return False
|
# return False
|
||||||
if idx_start < i and idx_end < i:
|
# if idx_start < i and idx_end < i:
|
||||||
return True
|
# return True
|
||||||
return None
|
# return None
|
||||||
|
|
||||||
#vytvori X a Y data z nastaveni self
|
# #vytvori X a Y data z nastaveni self
|
||||||
#pro vybrane runnery stahne data, vybere sloupce dle faature a target
|
# #pro vybrane runnery stahne data, vybere sloupce dle faature a target
|
||||||
#a vrátí jako sloupce v numpy poli
|
# #a vrátí jako sloupce v numpy poli
|
||||||
#zaroven vraci i rows_in_day pro nasledny sekvencing
|
# #zaroven vraci i rows_in_day pro nasledny sekvencing
|
||||||
def load_data(self, runners_ids: list = None, batch_id: list = None, source: Source = Source.RUNNERS):
|
# def load_data(self, runners_ids: list = None, batch_id: list = None, source: Source = Source.RUNNERS):
|
||||||
"""Service to load data for the model. Can be used for training or for vector prediction.
|
# """Service to load data for the model. Can be used for training or for vector prediction.
|
||||||
|
|
||||||
If input data are not provided, it will get the value from training model configuration (train_runners_ids, train_batch_id)
|
# If input data are not provided, it will get the value from training model configuration (train_runners_ids, train_batch_id)
|
||||||
|
|
||||||
Args:
|
# Args:
|
||||||
runner_ids:
|
# runner_ids:
|
||||||
batch_id:
|
# batch_id:
|
||||||
source: To load sample data.
|
# source: To load sample data.
|
||||||
|
|
||||||
Returns:
|
# Returns:
|
||||||
source_data,target_data,rows_in_day
|
# source_data,target_data,rows_in_day
|
||||||
"""
|
# """
|
||||||
rows_in_day = []
|
# rows_in_day = []
|
||||||
indicatorslist = []
|
# indicatorslist = []
|
||||||
#bud natahneme samply
|
# #bud natahneme samply
|
||||||
if source == Source.SAMPLES:
|
# if source == Source.SAMPLES:
|
||||||
if self.use_bars:
|
# if self.use_bars:
|
||||||
bars = sample_bars
|
# bars = sample_bars
|
||||||
else:
|
# else:
|
||||||
bars = {}
|
# bars = {}
|
||||||
indicators = sample_indicators
|
# indicators = sample_indicators
|
||||||
indicatorslist.append(indicators)
|
# indicatorslist.append(indicators)
|
||||||
#nebo dotahneme pozadovane runnery
|
# #nebo dotahneme pozadovane runnery
|
||||||
else:
|
# else:
|
||||||
#nalodujeme vsechny runnery jako listy (bud z runnerids nebo dle batchid)
|
# #nalodujeme vsechny runnery jako listy (bud z runnerids nebo dle batchid)
|
||||||
barslist, indicatorslist = self.load_runners_as_list(runner_id_list=runners_ids, batch_id=batch_id)
|
# barslist, indicatorslist = self.load_runners_as_list(runner_id_list=runners_ids, batch_id=batch_id)
|
||||||
#nerozumim
|
# #nerozumim
|
||||||
bl = deepcopy(barslist)
|
# bl = deepcopy(barslist)
|
||||||
il = deepcopy(indicatorslist)
|
# il = deepcopy(indicatorslist)
|
||||||
#a zmergujeme jejich data dohromady
|
# #a zmergujeme jejich data dohromady
|
||||||
bars = mu.merge_dicts(bl)
|
# bars = mu.merge_dicts(bl)
|
||||||
indicators = mu.merge_dicts(il)
|
# indicators = mu.merge_dicts(il)
|
||||||
|
|
||||||
#zaroven vytvarime pomocny list, kde stale drzime pocet radku per day (pro nasledny sekvencing)
|
# #zaroven vytvarime pomocny list, kde stale drzime pocet radku per day (pro nasledny sekvencing)
|
||||||
#zatim nad indikatory - v budoucnu zvazit, kdyby jelo neco jen nad barama
|
# #zatim nad indikatory - v budoucnu zvazit, kdyby jelo neco jen nad barama
|
||||||
for i, val in enumerate(indicatorslist):
|
# for i, val in enumerate(indicatorslist):
|
||||||
#pro prvni klic z indikatoru pocteme cnt
|
# #pro prvni klic z indikatoru pocteme cnt
|
||||||
pocet = len(indicatorslist[i][self.ind_features[0]])
|
# pocet = len(indicatorslist[i][self.ind_features[0]])
|
||||||
print("pro runner vkladame pocet", pocet)
|
# print("pro runner vkladame pocet", pocet)
|
||||||
rows_in_day.append(pocet)
|
# rows_in_day.append(pocet)
|
||||||
|
|
||||||
rows_in_day = np.array(rows_in_day)
|
# rows_in_day = np.array(rows_in_day)
|
||||||
rows_in_day = np.cumsum(rows_in_day)
|
# rows_in_day = np.cumsum(rows_in_day)
|
||||||
print("celkove pole rows_in_day(cumsum):", rows_in_day)
|
# print("celkove pole rows_in_day(cumsum):", rows_in_day)
|
||||||
|
|
||||||
print("Data LOADED.")
|
# print("Data LOADED.")
|
||||||
print(f"number of indicators {len(indicators)}")
|
# print(f"number of indicators {len(indicators)}")
|
||||||
print(f"number of bar elements{len(bars)}")
|
# print(f"number of bar elements{len(bars)}")
|
||||||
print(f"ind list length {len(indicators['time'])}")
|
# print(f"ind list length {len(indicators['time'])}")
|
||||||
print(f"bar list length {len(bars['time'])}")
|
# print(f"bar list length {len(bars['time'])}")
|
||||||
|
|
||||||
self.validate_available_features(bars, indicators)
|
# self.validate_available_features(bars, indicators)
|
||||||
|
|
||||||
print("Preparing FEATURES")
|
# print("Preparing FEATURES")
|
||||||
source_data, target_data = self.stack_bars_indicators(bars, indicators)
|
# source_data, target_data = self.stack_bars_indicators(bars, indicators)
|
||||||
return source_data, target_data, rows_in_day
|
# return source_data, target_data, rows_in_day
|
||||||
|
|
||||||
def validate_available_features(self, bars, indicators):
|
# def validate_available_features(self, bars, indicators):
|
||||||
for k in self.bar_features:
|
# for k in self.bar_features:
|
||||||
if not k in bars.keys():
|
# if not k in bars.keys():
|
||||||
raise Exception(f"Missing bar feature {k}")
|
# raise Exception(f"Missing bar feature {k}")
|
||||||
|
|
||||||
for k in self.ind_features:
|
# for k in self.ind_features:
|
||||||
if not k in indicators.keys():
|
# if not k in indicators.keys():
|
||||||
raise Exception(f"Missing ind feature {k}")
|
# raise Exception(f"Missing ind feature {k}")
|
||||||
|
|
||||||
def stack_bars_indicators(self, bars, indicators):
|
# def stack_bars_indicators(self, bars, indicators):
|
||||||
print("Stacking dicts to numpy")
|
# print("Stacking dicts to numpy")
|
||||||
print("Source - X")
|
# print("Source - X")
|
||||||
source_data = self.column_stack_source(bars, indicators)
|
# source_data = self.column_stack_source(bars, indicators)
|
||||||
print("shape", np.shape(source_data))
|
# print("shape", np.shape(source_data))
|
||||||
print("Target - Y", self.target)
|
# print("Target - Y", self.target)
|
||||||
target_data = self.column_stack_target(bars, indicators)
|
# target_data = self.column_stack_target(bars, indicators)
|
||||||
print("shape", np.shape(target_data))
|
# print("shape", np.shape(target_data))
|
||||||
|
|
||||||
return source_data, target_data
|
# return source_data, target_data
|
||||||
|
|
||||||
#pomocna sluzba, ktera provede vsechny transformace a inverzni scaling a vyleze z nej predikce
|
# #pomocna sluzba, ktera provede vsechny transformace a inverzni scaling a vyleze z nej predikce
|
||||||
#vstupem je standardni format ve strategii (state.bars, state.indicators)
|
# #vstupem je standardni format ve strategii (state.bars, state.indicators)
|
||||||
#vystupem je jedna hodnota
|
# #vystupem je jedna hodnota
|
||||||
def predict(self, bars, indicators) -> float:
|
# def predict(self, bars, indicators) -> float:
|
||||||
#oriznuti podle seqence - pokud je nastaveno v modelu
|
# #oriznuti podle seqence - pokud je nastaveno v modelu
|
||||||
lastNbars = slice_dict_lists(bars, self.input_sequences)
|
# lastNbars = slice_dict_lists(bars, self.input_sequences)
|
||||||
lastNindicators = slice_dict_lists(indicators, self.input_sequences)
|
# lastNindicators = slice_dict_lists(indicators, self.input_sequences)
|
||||||
# print("last5bars", lastNbars)
|
# # print("last5bars", lastNbars)
|
||||||
# print("last5indicators",lastNindicators)
|
# # print("last5indicators",lastNindicators)
|
||||||
|
|
||||||
combined_live_data = self.column_stack_source(lastNbars, lastNindicators, verbose=0)
|
# combined_live_data = self.column_stack_source(lastNbars, lastNindicators, verbose=0)
|
||||||
#print("combined_live_data",combined_live_data)
|
# #print("combined_live_data",combined_live_data)
|
||||||
combined_live_data = self.scalerX.transform(combined_live_data)
|
# combined_live_data = self.scalerX.transform(combined_live_data)
|
||||||
combined_live_data = np.array(combined_live_data)
|
# combined_live_data = np.array(combined_live_data)
|
||||||
#print("last 5 values combined data shape", np.shape(combined_live_data))
|
# #print("last 5 values combined data shape", np.shape(combined_live_data))
|
||||||
|
|
||||||
#converts to 3D array
|
# #converts to 3D array
|
||||||
# 1 number of samples in the array.
|
# # 1 number of samples in the array.
|
||||||
# 2 represents the sequence length.
|
# # 2 represents the sequence length.
|
||||||
# 3 represents the number of features in the data.
|
# # 3 represents the number of features in the data.
|
||||||
combined_live_data = combined_live_data.reshape((1, self.input_sequences, combined_live_data.shape[1]))
|
# combined_live_data = combined_live_data.reshape((1, self.input_sequences, combined_live_data.shape[1]))
|
||||||
|
|
||||||
# Make a prediction
|
# # Make a prediction
|
||||||
prediction = self.model(combined_live_data, training=False)
|
# prediction = self.model(combined_live_data, training=False)
|
||||||
#prediction = prediction.reshape((1, 1))
|
# #prediction = prediction.reshape((1, 1))
|
||||||
# Convert the prediction back to the original scale
|
# # Convert the prediction back to the original scale
|
||||||
prediction = self.scalerY.inverse_transform(prediction)
|
# prediction = self.scalerY.inverse_transform(prediction)
|
||||||
return float(prediction)
|
# return float(prediction)
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import v2realbot.controller.services as cs
|
# import v2realbot.controller.services as cs
|
||||||
from joblib import load
|
from joblib import load
|
||||||
from v2realbot.config import DATA_DIR
|
from v2realbot.config import DATA_DIR
|
||||||
|
|
||||||
|
|||||||
@ -17,8 +17,8 @@
|
|||||||
|
|
||||||
<!-- <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-KK94CHFLLe+nY2dmCWGMq91rCGa5gtU4mk92HdvYe+M/SXH301p5ILy+dN9+nJOZ" crossorigin="anonymous"> -->
|
<!-- <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-KK94CHFLLe+nY2dmCWGMq91rCGa5gtU4mk92HdvYe+M/SXH301p5ILy+dN9+nJOZ" crossorigin="anonymous"> -->
|
||||||
<link href="/static/js/libs/bootstrap.min.css" rel="stylesheet" crossorigin="anonymous">
|
<link href="/static/js/libs/bootstrap.min.css" rel="stylesheet" crossorigin="anonymous">
|
||||||
|
<!-- <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.11.1/font/bootstrap-icons.css">
|
||||||
|
-->
|
||||||
<!-- <link rel="stylesheet" href="https://cdn.datatables.net/1.13.4/css/dataTables.bootstrap5.min.css"> -->
|
<!-- <link rel="stylesheet" href="https://cdn.datatables.net/1.13.4/css/dataTables.bootstrap5.min.css"> -->
|
||||||
<link rel="stylesheet" href="/static/js/libs/dataTables.bootstrap5.min.css">
|
<link rel="stylesheet" href="/static/js/libs/dataTables.bootstrap5.min.css">
|
||||||
|
|
||||||
@ -156,10 +156,10 @@
|
|||||||
<div id="chart" style="display: None; float: left; "></div>
|
<div id="chart" style="display: None; float: left; "></div>
|
||||||
<div class="legend" id="legend"></div>
|
<div class="legend" id="legend"></div>
|
||||||
<div id="msgContainer">
|
<div id="msgContainer">
|
||||||
<div class="clearbutton">
|
<div class="clearbutton" id="clrButton">
|
||||||
<button id="button_clearlog" class="btn btn-outline-success btn-sm" style="display:none;">Clear</button>
|
<button id="button_clearlog" class="btn btn-outline-success btn-sm" style="display:none;">Clear</button>
|
||||||
</div>
|
</div>
|
||||||
<div class="msgContainerInner">
|
<div class="msgContainerInner" id="msgContainerInner">
|
||||||
<div id="lines">
|
<div id="lines">
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -296,6 +296,7 @@
|
|||||||
<button id="button_refresh" class="refresh btn btn-outline-success btn-sm">Refresh</button>
|
<button id="button_refresh" class="refresh btn btn-outline-success btn-sm">Refresh</button>
|
||||||
<button id="button_compare_arch" class="refresh btn btn-outline-success btn-sm">Compare</button>
|
<button id="button_compare_arch" class="refresh btn btn-outline-success btn-sm">Compare</button>
|
||||||
<button id="button_runagain_arch" class="refresh btn btn-outline-success btn-sm">Run Again(r)</button>
|
<button id="button_runagain_arch" class="refresh btn btn-outline-success btn-sm">Run Again(r)</button>
|
||||||
|
<button id="button_selpage" class="btn btn-outline-success btn-sm">Select all</button>
|
||||||
<!-- <button id="button_stopall" class="btn btn-outline-success btn-sm">Stop All</button>
|
<!-- <button id="button_stopall" class="btn btn-outline-success btn-sm">Stop All</button>
|
||||||
<button id="button_refresh" class="btn btn-outline-success btn-sm">Refresh</button> -->
|
<button id="button_refresh" class="btn btn-outline-success btn-sm">Refresh</button> -->
|
||||||
</div>
|
</div>
|
||||||
@ -406,6 +407,7 @@
|
|||||||
<button id="button_compare" class="btn btn-outline-success btn-sm">Compare</button>
|
<button id="button_compare" class="btn btn-outline-success btn-sm">Compare</button>
|
||||||
<button id="button_run" class="btn btn-outline-success btn-sm">Run Strategy(y)</button>
|
<button id="button_run" class="btn btn-outline-success btn-sm">Run Strategy(y)</button>
|
||||||
<button id="button_refresh" class="refresh btn btn-outline-success btn-sm">Refresh</button>
|
<button id="button_refresh" class="refresh btn btn-outline-success btn-sm">Refresh</button>
|
||||||
|
<button id="button_filter_strat" class="btn btn-outline-success btn-sm">Filter backtests</button>
|
||||||
</div>
|
</div>
|
||||||
<table id="stratinTable" class="table-striped table dataTable" style="width:100%; border-color: #dce1dc;">
|
<table id="stratinTable" class="table-striped table dataTable" style="width:100%; border-color: #dce1dc;">
|
||||||
<thead>
|
<thead>
|
||||||
|
|||||||
@ -109,6 +109,43 @@ function transform_data(data) {
|
|||||||
transformed["bars"] = bars
|
transformed["bars"] = bars
|
||||||
transformed["vwap"] = vwap
|
transformed["vwap"] = vwap
|
||||||
transformed["volume"] = volume
|
transformed["volume"] = volume
|
||||||
|
var bars = []
|
||||||
|
var volume = []
|
||||||
|
var vwap = []
|
||||||
|
|
||||||
|
|
||||||
|
if ((data.ext_data !== null) && (data.ext_data.dailyBars)) {
|
||||||
|
data.ext_data.dailyBars.time.forEach((element, index, array) => {
|
||||||
|
sbars = {};
|
||||||
|
svolume = {};
|
||||||
|
svwap = {};
|
||||||
|
|
||||||
|
sbars["time"] = element;
|
||||||
|
sbars["close"] = data.ext_data.dailyBars.close[index]
|
||||||
|
sbars["open"] = data.ext_data.dailyBars.open[index]
|
||||||
|
sbars["high"] = data.ext_data.dailyBars.high[index]
|
||||||
|
sbars["low"] = data.ext_data.dailyBars.low[index]
|
||||||
|
|
||||||
|
|
||||||
|
svwap["time"] = element
|
||||||
|
svwap["value"] = data.ext_data.dailyBars.vwap[index]
|
||||||
|
|
||||||
|
svolume["time"] = element
|
||||||
|
svolume["value"] = data.ext_data.dailyBars.volume[index]
|
||||||
|
|
||||||
|
bars.push(sbars)
|
||||||
|
vwap.push(svwap)
|
||||||
|
volume.push(svolume)
|
||||||
|
});
|
||||||
|
transformed["dailyBars"] = {}
|
||||||
|
transformed["dailyBars"]["bars"] = bars
|
||||||
|
transformed["dailyBars"]["vwap"] = vwap
|
||||||
|
transformed["dailyBars"]["volume"] = volume
|
||||||
|
var bars = []
|
||||||
|
var volume = []
|
||||||
|
var vwap = []
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//get markers - avgp line for all buys
|
//get markers - avgp line for all buys
|
||||||
var avgp_buy_line = []
|
var avgp_buy_line = []
|
||||||
@ -561,6 +598,20 @@ function chart_indicators(data, visible, offset) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
indList.sort((a, b) => {
|
||||||
|
const nameA = a.name.toUpperCase(); // ignore upper and lowercase
|
||||||
|
const nameB = b.name.toUpperCase(); // ignore upper and lowercase
|
||||||
|
if (nameA < nameB) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (nameA > nameB) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
// names must be equal
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
});
|
||||||
//vwap a volume zatim jen v detailnim zobrazeni
|
//vwap a volume zatim jen v detailnim zobrazeni
|
||||||
if (!offset) {
|
if (!offset) {
|
||||||
//display vwap and volume
|
//display vwap and volume
|
||||||
@ -854,15 +905,26 @@ function chart_archived_run(archRecord, data, oneMinuteBars) {
|
|||||||
//console.log("native", native_resolution)
|
//console.log("native", native_resolution)
|
||||||
|
|
||||||
//available intervals zatim jen 1m
|
//available intervals zatim jen 1m
|
||||||
var intervals = [data.native_resolution, '1m'];
|
var intervals = [data.native_resolution, '1m', '1d'];
|
||||||
|
|
||||||
|
var dailyData = null
|
||||||
|
if (transformed_data["dailyBars"]) {
|
||||||
|
dailyData = transformed_data["dailyBars"]["bars"]
|
||||||
|
}
|
||||||
|
//zkusime daily data dat do minuty
|
||||||
|
//console.log("daily", dailyData)
|
||||||
|
|
||||||
nativeData = transformed_data["bars"]
|
nativeData = transformed_data["bars"]
|
||||||
|
//console.log("native")
|
||||||
|
|
||||||
//get one minute data
|
//get one minute data
|
||||||
//tbd prepare volume
|
//tbd prepare volume
|
||||||
//console.log("oneMinuteData",oneMinuteBars)
|
//console.log("oneMinuteData",oneMinuteBars)
|
||||||
|
|
||||||
data["AllCandleSeriesesData"] = new Map([
|
data["AllCandleSeriesesData"] = new Map([
|
||||||
[data.native_resolution, nativeData ],
|
[data.native_resolution, nativeData ],
|
||||||
["1m", oneMinuteBars ],
|
["1m", dailyData?dailyData.concat(oneMinuteBars):oneMinuteBars],
|
||||||
|
["1d", dailyData ],
|
||||||
]);
|
]);
|
||||||
|
|
||||||
//dame si data do globalni, abychom je mohli pouzivat jinde (trochu prasarna, predelat pak)
|
//dame si data do globalni, abychom je mohli pouzivat jinde (trochu prasarna, predelat pak)
|
||||||
@ -970,8 +1032,7 @@ function chart_archived_run(archRecord, data, oneMinuteBars) {
|
|||||||
$("#statusAccount").text(archRecord.account)
|
$("#statusAccount").text(archRecord.account)
|
||||||
$("#statusIlog").text("Logged:" + archRecord.ilog_save)
|
$("#statusIlog").text("Logged:" + archRecord.ilog_save)
|
||||||
$("#statusStratvars").text(((archRecord.strat_json)?archRecord.strat_json:archRecord.stratvars),null,2)
|
$("#statusStratvars").text(((archRecord.strat_json)?archRecord.strat_json:archRecord.stratvars),null,2)
|
||||||
$("#statusSettings").text(JSON.stringify(archRecord.metrics,null,2) + " " + JSON.stringify(archRecord.settings,null,2))
|
$("#statusSettings").text(JSON.stringify(archRecord.metrics,null,2) + " " + JSON.stringify(archRecord.settings,null,2)+ JSON.stringify(data.ext_data,null,2))
|
||||||
|
|
||||||
//TBD other dynamically created indicators
|
//TBD other dynamically created indicators
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -43,6 +43,19 @@ function refresh_arch_and_callback(row, callback) {
|
|||||||
$(document).ready(function () {
|
$(document).ready(function () {
|
||||||
archiveRecords.ajax.reload();
|
archiveRecords.ajax.reload();
|
||||||
|
|
||||||
|
|
||||||
|
//button select page
|
||||||
|
$('#button_selpage').click(function () {
|
||||||
|
if ($('#button_selpage').hasClass('active')) {
|
||||||
|
$('#button_selpage').removeClass('active');
|
||||||
|
archiveRecords.rows().deselect();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
$('#button_selpage').addClass('active');
|
||||||
|
archiveRecords.rows( { page: 'current' } ).select();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
//button clear log
|
//button clear log
|
||||||
$('#button_clearlog').click(function () {
|
$('#button_clearlog').click(function () {
|
||||||
$('#lines').empty();
|
$('#lines').empty();
|
||||||
@ -454,7 +467,8 @@ $(document).ready(function () {
|
|||||||
if (record1.bt_to == "") {delete record1["bt_to"];}
|
if (record1.bt_to == "") {delete record1["bt_to"];}
|
||||||
|
|
||||||
//mazeme, pouze rerunujeme single
|
//mazeme, pouze rerunujeme single
|
||||||
record1["test_batch_id"];
|
delete record1["test_batch_id"];
|
||||||
|
delete record1["batch_id"];
|
||||||
|
|
||||||
const rec = new Object()
|
const rec = new Object()
|
||||||
rec.id2 = parseInt(stratData.id2);
|
rec.id2 = parseInt(stratData.id2);
|
||||||
|
|||||||
@ -28,7 +28,8 @@ $(document).ready(function () {
|
|||||||
|
|
||||||
obj = new Object()
|
obj = new Object()
|
||||||
obj.runner_id = runner_id
|
obj.runner_id = runner_id
|
||||||
obj.toml = TOML.parse(ind_editor.getValue())
|
// obj.toml = TOML.parse(ind_editor.getValue())
|
||||||
|
obj.toml = ""
|
||||||
obj.name = indname
|
obj.name = indname
|
||||||
jsonString = JSON.stringify(obj);
|
jsonString = JSON.stringify(obj);
|
||||||
//console.log("pred odeslanim",jsonString)
|
//console.log("pred odeslanim",jsonString)
|
||||||
|
|||||||
@ -197,6 +197,23 @@ $(document).ready(function () {
|
|||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
//button filter selected strat
|
||||||
|
$('#button_filter_strat').click(function () {
|
||||||
|
if ($('#button_filter_strat').hasClass('active')) {
|
||||||
|
$('#button_filter_strat').removeClass('active');
|
||||||
|
archiveRecords.columns().search("").draw();
|
||||||
|
console.log("draw")
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
row = stratinRecords.row('.selected').data();
|
||||||
|
if (row) {
|
||||||
|
$('#button_filter_strat').addClass('active');
|
||||||
|
archiveRecords.column(1).search(row.id).draw();
|
||||||
|
console.log("filteredon",row.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
//button get historical trades
|
//button get historical trades
|
||||||
$('#bt-trade').click(function () {
|
$('#bt-trade').click(function () {
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
|
|||||||
@ -1,11 +1,82 @@
|
|||||||
|
|
||||||
API_KEY = localStorage.getItem("api-key")
|
API_KEY = localStorage.getItem("api-key")
|
||||||
var chart = null
|
var chart = null
|
||||||
// var colors = ["#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#00425A","#B5D5C5","#e61957","#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#00425A","#B5D5C5","#e61957"]
|
|
||||||
// var reset_colors = ["#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#00425A","#B5D5C5","#e61957","#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#00425A","#B5D5C5","#e61957"]
|
|
||||||
var colors = ["#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#B5D5C5","#e61957","#7B0E60","#9B2888","#BD38A0","#A30F68","#6E0B50","#CA2183","#E6319B","#A04C54","#643848","#CA7474","#E68D8D","#4F9C34","#3B7128","#73DF4D","#95EF65","#A857A4","#824690","#D087CC","#E2A1DF","#79711B","#635D17","#99912B","#B1A73D","#3779C9","#2B68B3","#5599ED","#77A9F7","#004C67","#00687D","#A1C6B5","#8CC6A5","#C9E6D5","#E4F6EA","#D2144A","#A60F3B","#FA2463","#FF3775"];
|
|
||||||
var reset_colors = ["#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#B5D5C5","#e61957","#7B0E60","#9B2888","#BD38A0","#A30F68","#6E0B50","#CA2183","#E6319B","#A04C54","#643848","#CA7474","#E68D8D","#4F9C34","#3B7128","#73DF4D","#95EF65","#A857A4","#824690","#D087CC","#E2A1DF","#79711B","#635D17","#99912B","#B1A73D","#3779C9","#2B68B3","#5599ED","#77A9F7","#004C67","#00687D","#A1C6B5","#8CC6A5","#C9E6D5","#E4F6EA","#D2144A","#A60F3B","#FA2463","#FF3775"];
|
|
||||||
|
|
||||||
|
//puvodni mene vyrazne barvy
|
||||||
|
// var colors = ["#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#B5D5C5","#e61957","#7B0E60","#9B2888","#BD38A0","#A30F68","#6E0B50","#CA2183","#E6319B","#A04C54","#643848","#CA7474","#E68D8D","#4F9C34","#3B7128","#73DF4D","#95EF65","#A857A4","#824690","#D087CC","#FF3775","#E2A1DF","#79711B","#635D17","#99912B","#B1A73D","#3779C9","#A60F3B","#2B68B3","#5599ED","#77A9F7","#004C67","#00687D","#A1C6B5","#8CC6A5","#C9E6D5","#E4F6EA","#D2144A","#FA2463","#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#B5D5C5","#e61957","#7B0E60","#9B2888","#BD38A0","#A30F68","#6E0B50","#CA2183","#E6319B","#A04C54","#643848","#CA7474","#E68D8D","#4F9C34","#3B7128","#73DF4D","#95EF65","#A857A4","#824690","#D087CC","#FF3775","#E2A1DF","#79711B","#635D17","#99912B","#B1A73D","#3779C9","#A60F3B","#2B68B3","#5599ED","#77A9F7","#004C67","#00687D","#A1C6B5","#8CC6A5","#C9E6D5","#E4F6EA","#D2144A","#FA2463"];
|
||||||
|
// var reset_colors = ["#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#B5D5C5","#e61957","#7B0E60","#9B2888","#BD38A0","#A30F68","#6E0B50","#CA2183","#E6319B","#A04C54","#643848","#CA7474","#E68D8D","#4F9C34","#3B7128","#73DF4D","#95EF65","#A857A4","#824690","#D087CC","#FF3775","#E2A1DF","#79711B","#635D17","#99912B","#B1A73D","#3779C9","#A60F3B","#2B68B3","#5599ED","#77A9F7","#004C67","#00687D","#A1C6B5","#8CC6A5","#C9E6D5","#E4F6EA","#D2144A","#FA2463","#8B1874","#B71375","#B46060","#61c740","#BE6DB7","#898121","#4389d9","#B5D5C5","#e61957","#7B0E60","#9B2888","#BD38A0","#A30F68","#6E0B50","#CA2183","#E6319B","#A04C54","#643848","#CA7474","#E68D8D","#4F9C34","#3B7128","#73DF4D","#95EF65","#A857A4","#824690","#D087CC","#FF3775","#E2A1DF","#79711B","#635D17","#99912B","#B1A73D","#3779C9","#A60F3B","#2B68B3","#5599ED","#77A9F7","#004C67","#00687D","#A1C6B5","#8CC6A5","#C9E6D5","#E4F6EA","#D2144A","#FA2463"];
|
||||||
|
|
||||||
|
// function generateColorPalette(numColors) {
|
||||||
|
// const palette = [];
|
||||||
|
// let lastColor = null;
|
||||||
|
// for (let i = 0; i < numColors; i++) {
|
||||||
|
// let color = generateRandomColor();
|
||||||
|
// while (isColorDark(color) || areColorsTooSimilar(color, lastColor)) {
|
||||||
|
// color = generateRandomColor();
|
||||||
|
// }
|
||||||
|
// lastColor = color;
|
||||||
|
// palette.push(color);
|
||||||
|
// }
|
||||||
|
// return palette;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// function generateRandomColor() {
|
||||||
|
// const letters = '0123456789ABCDEF';
|
||||||
|
// let color = '#';
|
||||||
|
// for (let i = 0; i < 6; i++) {
|
||||||
|
// color += letters[Math.floor(Math.random() * 16)];
|
||||||
|
// }
|
||||||
|
// return color;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// function areColorsTooSimilar(color1, color2) {
|
||||||
|
// if (!color1 || !color2) {
|
||||||
|
// return false;
|
||||||
|
// }
|
||||||
|
// // Calculate the color difference
|
||||||
|
// const diff = parseInt(color1.substring(1), 16) - parseInt(color2.substring(1), 16);
|
||||||
|
// // Define a threshold for color difference (you can adjust this value)
|
||||||
|
// const threshold = 500;
|
||||||
|
// return Math.abs(diff) < threshold;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// function isColorDark(color) {
|
||||||
|
// const hexColor = color.replace("#", "");
|
||||||
|
// const r = parseInt(hexColor.substr(0, 2), 16);
|
||||||
|
// const g = parseInt(hexColor.substr(2, 2), 16);
|
||||||
|
// const b = parseInt(hexColor.substr(4, 2), 16);
|
||||||
|
// const brightness = (r * 299 + g * 587 + b * 114) / 1000;
|
||||||
|
// return brightness < 128 || brightness > 140; // You can adjust the threshold for what you consider 'dark'
|
||||||
|
// }
|
||||||
|
|
||||||
|
// colors = generateColorPalette(255)
|
||||||
|
// reset_colors = colors
|
||||||
|
|
||||||
|
// console.log(`"${colors.join("\", \"")}"`);
|
||||||
|
|
||||||
|
// // pekne vygenrovane pomoci kodu vyse
|
||||||
|
var colors = ["#63AA57", "#8F8AB0", "#4CAA4E", "#E24AEE", "#D06AA6", "#7891BA", "#A39A34", "#8A94A2", "#8887A7", "#61BB2F", "#FD569D", "#1EB6E1",
|
||||||
|
"#379AC9", "#FD6F2E", "#8C9858", "#39A4A3", "#6D97F4", "#1ECB01", "#FA5B16", "#A6891C", "#48CF10", "#D27B26", "#D56B55", "#FE3AB8", "#E35C51",
|
||||||
|
"#EC4FE6", "#E250A3", "#BA618E", "#1BC074", "#C57784", "#888BC5", "#4FA452", "#80885C", "#B97272", "#33BF98", "#B7961D", "#A07284", "#02E54E",
|
||||||
|
"#AF7F35", "#F852EF", "#6D955B", "#E0676E", "#F73DEC", "#CE53FD", "#9773D3", "#649E81", "#D062CE", "#AB73E7", "#A4729C", "#E76A07", "#E85CCB",
|
||||||
|
"#A16FB1", "#4BB859", "#B25EE2", "#8580CE", "#A275EF", "#AC9245", "#4D988D", "#B672C9", "#4CA96E", "#C9873E", "#5BB147", "#10C783", "#D7647D",
|
||||||
|
"#CB893A", "#A586BA", "#28C0A2", "#61A755", "#0EB7C5", "#2DADBC", "#17BB71", "#2BC733", "#2BB890", "#F04EF8", "#699580", "#A88809", "#EB3FF6",
|
||||||
|
"#A75ED3", "#859171", "#BB6285", "#81A147", "#AD7CD2", "#65B630", "#C9616C", "#BD5EFA", "#7A9F30", "#2AB6AB", "#FC496A", "#687FC7", "#DB40E7",
|
||||||
|
"#07BCE9", "#509F63", "#EC4FDD", "#A079BE", "#C17297", "#E447C2", "#E95AD9", "#9FA01E", "#7E86CF", "#21E316", "#1CABF9", "#17C24F", "#9C9254",
|
||||||
|
"#C97994", "#4BA9DA", "#0DD595", "#13BEA8", "#C2855D", "#DF6C13", "#60B370", "#0FC3F6", "#C1830E", "#3AC917", "#0EBBB0", "#CC50B4", "#B768EC",
|
||||||
|
"#D47F49", "#B47BC5", "#38ADBD", "#05DC53", "#44CD4E", "#838E65", "#49D70F", "#2DADBE", "#2CB0C9", "#DA703E", "#06B5CA", "#7BAF3E", "#918E79",
|
||||||
|
"#2AA5E5", "#C37F5E", "#07B8C9", "#4CBA27", "#E752C6", "#7F93B2", "#4798CD", "#45AA4C", "#4DB666", "#7683A7", "#758685", "#4B9FAD", "#9280FD",
|
||||||
|
"#6682DD", "#42ACBE", "#C1609F", "#D850DB", "#649A62", "#54CC22", "#AD81C1", "#BF7A43", "#0FCEA5", "#D06DAF", "#87799B", "#4DA94E", "#2FD654",
|
||||||
|
"#07D587", "#21CF0C", "#03CF34", "#42C771", "#D563CD", "#6D9E9A", "#C76C59", "#68B368", "#11BCE5", "#0DCFB3", "#9266D8", "#BF67F6", "#88A04E",
|
||||||
|
"#73BE17", "#67B437", "#8586E4", "#9F8749", "#479CA5", "#CC777E", "#4FAF46", "#9D9836", "#918DAF", "#D167B8", "#6F9DA5", "#2BB167", "#16B8BC",
|
||||||
|
"#B4861F", "#A08487", "#67B357", "#5CAA5C", "#20CA49", "#D18813", "#15D63F", "#C8618F", "#887E92", "#21C457", "#4EA8CE", "#53BE49", "#5A86D5",
|
||||||
|
"#BD7E4E", "#27B0A1", "#33CF42", "#709083", "#38A8DE", "#4CA762", "#1EA4FF", "#DE3EE4", "#70A860", "#39A3C8", "#6BBB39", "#F053F4", "#8C7FB5",
|
||||||
|
"#969F21", "#B19841", "#E57148", "#C25DA7", "#6DA979", "#B27D73", "#7F9786", "#41AC99", "#C58848", "#948F9E", "#6BB620", "#81AB3B", "#09DE44",
|
||||||
|
"#43A9D2", "#41B0D7", "#20ACAA", "#649FCB", "#CD8345", "#A88669", "#3EA5E7", "#F36A19", "#E06B48", "#8388BD", "#EC6153", "#639082", "#52CA32",
|
||||||
|
"#878BAA", "#02BCDB", "#828FD9", "#3DC07F", "#29D46A", "#9C7CC1", "#EB7713", "#F95F6A", "#E25F4C", "#589994", "#D45AB7", "#DE66AB", "#B8715F",
|
||||||
|
"#E850F4", "#FB6420", "#C2832C", "#6383C5", "#D57A58", "#EF652C", "#02D71A", "#ED664D", "#60A526"]
|
||||||
|
|
||||||
|
var reset_colors = colors.slice()
|
||||||
|
|
||||||
var indList = []
|
var indList = []
|
||||||
var verticalSeries=null
|
var verticalSeries=null
|
||||||
@ -259,7 +330,7 @@ function initialize_chart() {
|
|||||||
//var chartOptions = { width: 1045, height: 600, leftPriceScale: {visible: true}}
|
//var chartOptions = { width: 1045, height: 600, leftPriceScale: {visible: true}}
|
||||||
|
|
||||||
//TMAVY MOD
|
//TMAVY MOD
|
||||||
var chartOptions = { width: 1080,
|
var chartOptions = { width: 1280,
|
||||||
height: 600,
|
height: 600,
|
||||||
leftPriceScale: {visible: true},
|
leftPriceScale: {visible: true},
|
||||||
layout: {
|
layout: {
|
||||||
@ -483,6 +554,54 @@ function profitLineToggle() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//togle go wide
|
||||||
|
function toggleWide() {
|
||||||
|
width = 2000;
|
||||||
|
const elem = document.getElementById("goWide");
|
||||||
|
const msgContainer = document.getElementById("msgContainer");
|
||||||
|
const msgContainerInner = document.getElementById("msgContainerInner");
|
||||||
|
const clrButton = document.getElementById("clrButton");
|
||||||
|
|
||||||
|
if (elem.classList.contains("switcher-active-item")) {
|
||||||
|
width = 1080;
|
||||||
|
msgContainer.removeAttribute("style");
|
||||||
|
msgContainerInner.removeAttribute("style");
|
||||||
|
clrButton.removeAttribute("style");
|
||||||
|
} else
|
||||||
|
{
|
||||||
|
msgContainer.style.display = "block"
|
||||||
|
msgContainerInner.style.display = "none"
|
||||||
|
clrButton.style.display = "none"
|
||||||
|
}
|
||||||
|
elem.classList.toggle("switcher-active-item");
|
||||||
|
|
||||||
|
if (chart) {
|
||||||
|
chart.applyOptions({ width: width});
|
||||||
|
chart.timeScale().fitContent();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//togle profit line
|
||||||
|
function mrkLineToggle() {
|
||||||
|
vis = true;
|
||||||
|
const elem = document.getElementById("mrkLine");
|
||||||
|
if (elem.classList.contains("switcher-active-item")) {
|
||||||
|
vis = false;
|
||||||
|
}
|
||||||
|
elem.classList.toggle("switcher-active-item");
|
||||||
|
//v ifu kvuli workaroundu
|
||||||
|
if (markersLine) {
|
||||||
|
markersLine.applyOptions({
|
||||||
|
visible: vis });
|
||||||
|
}
|
||||||
|
if (slLine) {
|
||||||
|
slLine.forEach((series, index, array) => {
|
||||||
|
series.applyOptions({
|
||||||
|
visible: vis });
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//toggle indiktoru
|
//toggle indiktoru
|
||||||
function onItemClickedToggle(index) {
|
function onItemClickedToggle(index) {
|
||||||
@ -555,6 +674,46 @@ function populate_indicator_buttons(def) {
|
|||||||
itemEl.addEventListener('click', function(e) {
|
itemEl.addEventListener('click', function(e) {
|
||||||
profitLineToggle();
|
profitLineToggle();
|
||||||
});
|
});
|
||||||
|
buttonElement.appendChild(itemEl);
|
||||||
|
|
||||||
|
//button pro toggle fullscreenu
|
||||||
|
var itemEl = document.createElement('button');
|
||||||
|
itemEl.innerText = "wide"
|
||||||
|
itemEl.classList.add('switcher-item');
|
||||||
|
itemEl.style.color = "#99912b"
|
||||||
|
itemEl.id = "goWide"
|
||||||
|
itemEl.addEventListener('click', function(e) {
|
||||||
|
toggleWide();
|
||||||
|
});
|
||||||
|
buttonElement.appendChild(itemEl);
|
||||||
|
|
||||||
|
// //button pro toggle markeru nakupu/prodeju
|
||||||
|
var itemEl = document.createElement('button');
|
||||||
|
itemEl.innerText = "mrk"
|
||||||
|
itemEl.classList.add('switcher-item');
|
||||||
|
itemEl.classList.add('switcher-active-item');
|
||||||
|
// if ((activatedButtons) && (!activatedButtons.includes("mrk"))) {
|
||||||
|
// }
|
||||||
|
// else {
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
|
|
||||||
|
itemEl.style.color = "#99912b"
|
||||||
|
itemEl.id = "mrkLine"
|
||||||
|
|
||||||
|
// // Create an icon element
|
||||||
|
// const iconEl = document.createElement('i');
|
||||||
|
// // Set the icon class
|
||||||
|
// iconEl.classList.add('bi');
|
||||||
|
// iconEl.classList.add('bi-rainbow'); // Replace `icon-name` with the name of the icon you want to use
|
||||||
|
// // Append the icon element to the button element
|
||||||
|
// itemEl.appendChild(iconEl);
|
||||||
|
|
||||||
|
itemEl.addEventListener('click', function(e) {
|
||||||
|
mrkLineToggle();
|
||||||
|
});
|
||||||
|
|
||||||
buttonElement.appendChild(itemEl);
|
buttonElement.appendChild(itemEl);
|
||||||
|
|
||||||
//create plus button to create new button
|
//create plus button to create new button
|
||||||
|
|||||||
@ -290,7 +290,7 @@ html {
|
|||||||
display: inline-block;
|
display: inline-block;
|
||||||
/* overflow: auto; */
|
/* overflow: auto; */
|
||||||
height: 600px;
|
height: 600px;
|
||||||
width: max-content;
|
width: 750px;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,6 +364,7 @@ pre {
|
|||||||
|
|
||||||
.headerItem {
|
.headerItem {
|
||||||
padding-right: 30px;
|
padding-right: 30px;
|
||||||
|
color: #33c1aa;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* .highlighted {
|
/* .highlighted {
|
||||||
|
|||||||
@ -25,23 +25,45 @@ class StrategyClassicSL(Strategy):
|
|||||||
super().__init__(name, symbol, next, init, account, mode, stratvars, open_rush, close_rush, pe, se, runner_id, ilog_save)
|
super().__init__(name, symbol, next, init, account, mode, stratvars, open_rush, close_rush, pe, se, runner_id, ilog_save)
|
||||||
|
|
||||||
#zkontroluje zda aktualni profit/loss - nedosahnul limit a pokud ano tak vypne strategii
|
#zkontroluje zda aktualni profit/loss - nedosahnul limit a pokud ano tak vypne strategii
|
||||||
|
|
||||||
|
##TODO zestručnit a dát pryč opakovací kód
|
||||||
async def stop_when_max_profit_loss(self):
|
async def stop_when_max_profit_loss(self):
|
||||||
self.state.ilog(e="CHECK MAX PROFIT")
|
self.state.ilog(e="CHECK MAX PROFIT")
|
||||||
max_sum_profit_to_quit = safe_get(self.state.vars, "max_sum_profit_to_quit", None)
|
max_sum_profit_to_quit = safe_get(self.state.vars, "max_sum_profit_to_quit", None)
|
||||||
max_sum_loss_to_quit = safe_get(self.state.vars, "max_sum_loss_to_quit", None)
|
max_sum_loss_to_quit = safe_get(self.state.vars, "max_sum_loss_to_quit", None)
|
||||||
|
|
||||||
|
max_sum_profit_to_quit_rel = safe_get(self.state.vars, "max_sum_profit_to_quit_rel", None)
|
||||||
|
max_sum_loss_to_quit_rel = safe_get(self.state.vars, "max_sum_loss_to_quit_rel", None)
|
||||||
|
|
||||||
|
if max_sum_profit_to_quit_rel is not None:
|
||||||
|
rel_profit = round(float(np.mean(self.state.rel_profit_cum)),5)
|
||||||
|
if rel_profit >= float(max_sum_profit_to_quit_rel):
|
||||||
|
self.state.ilog(e=f"QUITTING MAX SUM REL PROFIT REACHED {max_sum_profit_to_quit_rel=} {self.state.profit=} {rel_profit=}")
|
||||||
|
self.state.vars.pending = "max_sum_profit_to_quit_rel"
|
||||||
|
send_to_telegram(f"QUITTING MAX SUM REL PROFIT REACHED {max_sum_profit_to_quit_rel=} {self.state.profit=} {rel_profit=}")
|
||||||
|
self.se.set()
|
||||||
|
return True
|
||||||
|
if max_sum_loss_to_quit_rel is not None:
|
||||||
|
rel_profit = round(float(np.mean(self.state.rel_profit_cum)),5)
|
||||||
|
if rel_profit < 0 and rel_profit <= float(max_sum_loss_to_quit_rel):
|
||||||
|
self.state.ilog(e=f"QUITTING MAX SUM REL LOSS REACHED {max_sum_loss_to_quit_rel=} {self.state.profit=} {rel_profit=}")
|
||||||
|
self.state.vars.pending = "max_sum_loss_to_quit_rel"
|
||||||
|
send_to_telegram(f"QUITTING MAX SUM REL LOSS REACHED {max_sum_loss_to_quit_rel=} {self.state.profit=} {rel_profit=}")
|
||||||
|
self.se.set()
|
||||||
|
return True
|
||||||
|
|
||||||
if max_sum_profit_to_quit is not None:
|
if max_sum_profit_to_quit is not None:
|
||||||
if float(self.state.profit) >= float(max_sum_profit_to_quit):
|
if float(self.state.profit) >= float(max_sum_profit_to_quit):
|
||||||
self.state.ilog(e=f"QUITTING MAX SUM PROFIT REACHED {max_sum_profit_to_quit=} {self.state.profit=}")
|
self.state.ilog(e=f"QUITTING MAX SUM ABS PROFIT REACHED {max_sum_profit_to_quit=} {self.state.profit=} {rel_profit=}")
|
||||||
self.state.vars.pending = "max_sum_profit_to_quit"
|
self.state.vars.pending = "max_sum_profit_to_quit"
|
||||||
send_to_telegram(f"QUITTING MAX SUM PROFIT REACHED {max_sum_profit_to_quit=} {self.state.profit=}")
|
send_to_telegram(f"QUITTING MAX SUM ABS PROFIT REACHED {max_sum_profit_to_quit=} {self.state.profit=} {rel_profit=}")
|
||||||
self.se.set()
|
self.se.set()
|
||||||
return True
|
return True
|
||||||
if max_sum_loss_to_quit is not None:
|
if max_sum_loss_to_quit is not None:
|
||||||
if float(self.state.profit) < 0 and float(self.state.profit) <= float(max_sum_loss_to_quit):
|
if float(self.state.profit) < 0 and float(self.state.profit) <= float(max_sum_loss_to_quit):
|
||||||
self.state.ilog(e=f"QUITTING MAX SUM LOSS REACHED {max_sum_loss_to_quit=} {self.state.profit=}")
|
self.state.ilog(e=f"QUITTING MAX SUM ABS LOSS REACHED {max_sum_loss_to_quit=} {self.state.profit=} {rel_profit=}")
|
||||||
self.state.vars.pending = "max_sum_loss_to_quit"
|
self.state.vars.pending = "max_sum_loss_to_quit"
|
||||||
send_to_telegram(f"QUITTING MAX SUM LOSS REACHED {max_sum_loss_to_quit=} {self.state.profit=}")
|
send_to_telegram(f"QUITTING MAX SUM ABS LOSS REACHED {max_sum_loss_to_quit=} {self.state.profit=} {rel_profit=}")
|
||||||
self.se.set()
|
self.se.set()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -167,6 +189,10 @@ class StrategyClassicSL(Strategy):
|
|||||||
|
|
||||||
self.state.ilog(e="BUY: Jde o LONG nakuú nepocitame profit zatim")
|
self.state.ilog(e="BUY: Jde o LONG nakuú nepocitame profit zatim")
|
||||||
|
|
||||||
|
if data.event == TradeEvent.FILL:
|
||||||
|
#zapisujeme last entry price
|
||||||
|
self.state.last_entry_price["long"] = data.price
|
||||||
|
|
||||||
#ic("vstupujeme do orderupdatebuy")
|
#ic("vstupujeme do orderupdatebuy")
|
||||||
print(data)
|
print(data)
|
||||||
#dostavame zde i celkové akutální množství - ukládáme
|
#dostavame zde i celkové akutální množství - ukládáme
|
||||||
@ -279,6 +305,10 @@ class StrategyClassicSL(Strategy):
|
|||||||
|
|
||||||
self.state.ilog(e="SELL: Jde o SHORT nepocitame profit zatim")
|
self.state.ilog(e="SELL: Jde o SHORT nepocitame profit zatim")
|
||||||
|
|
||||||
|
if data.event == TradeEvent.FILL:
|
||||||
|
#zapisujeme last entry price
|
||||||
|
self.state.last_entry_price["short"] = data.price
|
||||||
|
|
||||||
#update pozic, v trade update je i pocet zbylych pozic
|
#update pozic, v trade update je i pocet zbylych pozic
|
||||||
old_avgp = self.state.avgp
|
old_avgp = self.state.avgp
|
||||||
old_pos = self.state.positions
|
old_pos = self.state.positions
|
||||||
|
|||||||
@ -25,6 +25,8 @@ from threading import Event, current_thread
|
|||||||
import json
|
import json
|
||||||
from uuid import UUID
|
from uuid import UUID
|
||||||
from rich import print as printnow
|
from rich import print as printnow
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
if PROFILING_NEXT_ENABLED:
|
if PROFILING_NEXT_ENABLED:
|
||||||
from pyinstrument import Profiler
|
from pyinstrument import Profiler
|
||||||
profiler = Profiler()
|
profiler = Profiler()
|
||||||
@ -662,6 +664,7 @@ class StrategyState:
|
|||||||
self.time = 0
|
self.time = 0
|
||||||
#time of last trade processed
|
#time of last trade processed
|
||||||
self.last_trade_time = 0
|
self.last_trade_time = 0
|
||||||
|
self.last_entry_price=dict(long=0,short=999)
|
||||||
self.timeframe = None
|
self.timeframe = None
|
||||||
self.runner_id = runner_id
|
self.runner_id = runner_id
|
||||||
self.bt = bt
|
self.bt = bt
|
||||||
@ -705,13 +708,14 @@ class StrategyState:
|
|||||||
self.sell_l = self.interface.sell_l
|
self.sell_l = self.interface.sell_l
|
||||||
self.cancel_pending_buys = None
|
self.cancel_pending_buys = None
|
||||||
self.iter_log_list = []
|
self.iter_log_list = []
|
||||||
|
self.dailyBars = defaultdict(dict)
|
||||||
#celkovy profit (prejmennovat na profit_cum)
|
#celkovy profit (prejmennovat na profit_cum)
|
||||||
self.profit = 0
|
self.profit = 0
|
||||||
#celkovy relativni profit (obsahuje pole relativnich zisku, z jeho meanu se spocita celkovy rel_profit_cu,)
|
#celkovy relativni profit (obsahuje pole relativnich zisku, z jeho meanu se spocita celkovy rel_profit_cu,)
|
||||||
self.rel_profit_cum = []
|
self.rel_profit_cum = []
|
||||||
self.tradeList = []
|
self.tradeList = []
|
||||||
#nova promenna pro externi data do ArchiveDetaili, napr. pro zobrazeni v grafu, je zde např. SL history
|
#nova promenna pro externi data do ArchiveDetaili, napr. pro zobrazeni v grafu, je zde např. SL history
|
||||||
self.extData = {}
|
self.extData = defaultdict(dict)
|
||||||
self.mode = None
|
self.mode = None
|
||||||
self.wait_for_fill = None
|
self.wait_for_fill = None
|
||||||
|
|
||||||
|
|||||||
96
v2realbot/strategyblocks/activetrade/close/eod_exit.py
Normal file
96
v2realbot/strategyblocks/activetrade/close/eod_exit.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
from v2realbot.strategy.base import StrategyState
|
||||||
|
from v2realbot.strategy.StrategyOrderLimitVykladaciNormalizedMYSELL import StrategyOrderLimitVykladaciNormalizedMYSELL
|
||||||
|
from v2realbot.enums.enums import RecordType, StartBarAlign, Mode, Account, Followup
|
||||||
|
from v2realbot.common.PrescribedTradeModel import Trade, TradeDirection, TradeStatus
|
||||||
|
from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, print, safe_get, is_still, is_window_open, eval_cond_dict, crossed_down, crossed_up, crossed, is_pivot, json_serial, pct_diff, create_new_bars, slice_dict_lists
|
||||||
|
from v2realbot.utils.directive_utils import get_conditions_from_configuration
|
||||||
|
from v2realbot.ml.mlutils import load_model
|
||||||
|
from v2realbot.common.model import SLHistory
|
||||||
|
from v2realbot.config import KW
|
||||||
|
from uuid import uuid4
|
||||||
|
from datetime import datetime
|
||||||
|
#import random
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
#from icecream import install, ic
|
||||||
|
from rich import print as printanyway
|
||||||
|
from threading import Event
|
||||||
|
import os
|
||||||
|
from traceback import format_exc
|
||||||
|
from v2realbot.strategyblocks.activetrade.helpers import insert_SL_history
|
||||||
|
from v2realbot.strategyblocks.activetrade.close.conditions import dontexit_protection_met, exit_conditions_met
|
||||||
|
from v2realbot.strategyblocks.activetrade.helpers import get_max_profit_price, get_profit_target_price, get_override_for_active_trade, keyword_conditions_met
|
||||||
|
|
||||||
|
|
||||||
|
def eod_exit_activated(state: StrategyState, data, direction: TradeDirection):
|
||||||
|
"""
|
||||||
|
Function responsible for end of day management
|
||||||
|
|
||||||
|
V budoucnu bude obsahovat optimalizace pro uzaviraci okno
|
||||||
|
(obsahuje subokna - nejprve ceka na snizený profit, pak na minimálně breakeven a naposledy až forced exit)
|
||||||
|
|
||||||
|
1) zatim pouze - na breakeven(cele okno) + forced exit(posledni minuta)
|
||||||
|
|
||||||
|
|
||||||
|
do budoucna udelat interpolacni krivku s ubývajícím časem na snížování profit
|
||||||
|
tzn. mam např. 60minut, tak rozdělím 4:2 +poslední minuta
|
||||||
|
- 40 snižující profit (aktuální profit je např. 0.20ticků - tzn. 40 je 0.20, 0 je 0) - print(np.interp(atr10, [1, 10,11,12], [0, 1,100,1]))
|
||||||
|
- 19 waiting for breakeven
|
||||||
|
- 1 min forced immediate
|
||||||
|
"""
|
||||||
|
|
||||||
|
directive_name = "forced_exit_window_start"
|
||||||
|
forced_exit_window_start = get_override_for_active_trade(state, directive_name=directive_name, default_value=safe_get(state.vars, directive_name, None))
|
||||||
|
|
||||||
|
if forced_exit_window_start is None:
|
||||||
|
state.ilog(lvl=0,e="Forced exit not required.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
directive_name = "forced_exit_window_end"
|
||||||
|
forced_exit_window_end = get_override_for_active_trade(state, directive_name=directive_name, default_value=safe_get(state.vars, directive_name, 389))
|
||||||
|
|
||||||
|
if forced_exit_window_start>389:
|
||||||
|
state.ilog(lvl=0,e="Forced exit window end max is 389")
|
||||||
|
return False
|
||||||
|
|
||||||
|
#TBD - mozna brat skutecny cas (state.time) - nez cas tradu? mozna do budoucna
|
||||||
|
if is_window_open(datetime.fromtimestamp(data['updated']).astimezone(zoneNY), forced_exit_window_start, forced_exit_window_end) is False:
|
||||||
|
state.ilog(lvl=1,e=f"Forced Exit Window CLOSED", msg=f"{forced_exit_window_start=} {forced_exit_window_end=} ", time=str(datetime.fromtimestamp(data['updated']).astimezone(zoneNY)))
|
||||||
|
return False
|
||||||
|
|
||||||
|
# #dokdy konci okno snizujiciho se profitu (zbytek je breakeven a posledni minuta forced) - default pulka okna
|
||||||
|
# directive_name = "forced_exit_decreasing_profit_window_end"
|
||||||
|
# forced_exit_decreasing_profit_window_end = get_override_for_active_trade(state, directive_name=directive_name, default_value=safe_get(state.vars, directive_name, (forced_exit_window_end-forced_exit_window_end)/2))
|
||||||
|
|
||||||
|
# if forced_exit_decreasing_profit_window_end > forced_exit_window_end-1:
|
||||||
|
# state.ilog(lvl=0,e="Decreasing profit window must be less than window end -1.")
|
||||||
|
# return False
|
||||||
|
|
||||||
|
#TODO v rámci profit optimalizace, udelat decreasing profit window direktivu jez kontroluje interpolovaný snizujici zisk až do 0 a pak až jede breakeven
|
||||||
|
#TODO v rámci tech optimalizace nevolat is_window_open dvakrat - volá se per tick
|
||||||
|
if is_window_open(datetime.fromtimestamp(data['updated']).astimezone(zoneNY), forced_exit_window_start, forced_exit_window_end-1) is True:
|
||||||
|
state.ilog(lvl=1,e=f"Forced Exit Window OPEN - breakeven check", msg=f"{forced_exit_window_start=} {forced_exit_window_end=} ", time=str(datetime.fromtimestamp(data['updated']).astimezone(zoneNY)))
|
||||||
|
|
||||||
|
directive_name = "forced_exit_breakeven_period"
|
||||||
|
forced_exit_breakeven_period = get_override_for_active_trade(state, directive_name=directive_name, default_value=safe_get(state.vars, directive_name, True))
|
||||||
|
|
||||||
|
if forced_exit_breakeven_period is False:
|
||||||
|
return False
|
||||||
|
|
||||||
|
#zatim krom posledni minuty cekame alespon na breakeven
|
||||||
|
curr_price = float(data['close'])
|
||||||
|
#short smer
|
||||||
|
if direction == TradeDirection.SHORT and curr_price<=float(state.avgp):
|
||||||
|
state.ilog(lvl=1,e=f"Forced Exit - price better than avgp, dir SHORT")
|
||||||
|
return True
|
||||||
|
|
||||||
|
if direction == TradeDirection.LONG and curr_price>=float(state.avgp):
|
||||||
|
state.ilog(lvl=1,e=f"Forced Exit - price better than avgp, dir LONG")
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
state.ilog(lvl=1,e=f"Forced Exit - last minute - EXIT IMMEDIATE")
|
||||||
|
return True
|
||||||
|
|
||||||
@ -9,6 +9,7 @@ from rich import print as printanyway
|
|||||||
from threading import Event
|
from threading import Event
|
||||||
import os
|
import os
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
|
from v2realbot.strategyblocks.activetrade.close.eod_exit import eod_exit_activated
|
||||||
from v2realbot.strategyblocks.activetrade.close.conditions import dontexit_protection_met, exit_conditions_met
|
from v2realbot.strategyblocks.activetrade.close.conditions import dontexit_protection_met, exit_conditions_met
|
||||||
from v2realbot.strategyblocks.activetrade.helpers import get_max_profit_price, get_profit_target_price, get_override_for_active_trade, keyword_conditions_met
|
from v2realbot.strategyblocks.activetrade.helpers import get_max_profit_price, get_profit_target_price, get_override_for_active_trade, keyword_conditions_met
|
||||||
|
|
||||||
@ -28,10 +29,6 @@ def eval_close_position(state: StrategyState, data):
|
|||||||
max_price = get_max_profit_price(state, data, TradeDirection.SHORT)
|
max_price = get_max_profit_price(state, data, TradeDirection.SHORT)
|
||||||
state.ilog(lvl=1,e=f"Goal price {str(TradeDirection.SHORT)} {goal_price} max price {max_price}")
|
state.ilog(lvl=1,e=f"Goal price {str(TradeDirection.SHORT)} {goal_price} max price {max_price}")
|
||||||
|
|
||||||
|
|
||||||
#EOD EXIT - TBD
|
|
||||||
#FORCED EXIT PRI KONCI DNE
|
|
||||||
|
|
||||||
#SL - execution
|
#SL - execution
|
||||||
if curr_price > state.vars.activeTrade.stoploss_value:
|
if curr_price > state.vars.activeTrade.stoploss_value:
|
||||||
|
|
||||||
@ -47,6 +44,7 @@ def eval_close_position(state: StrategyState, data):
|
|||||||
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason="SL REACHED", followup=followup_action)
|
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason="SL REACHED", followup=followup_action)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
#REVERSE BASED ON REVERSE CONDITIONS
|
#REVERSE BASED ON REVERSE CONDITIONS
|
||||||
if keyword_conditions_met(state, data, direction=TradeDirection.SHORT, keyword=KW.reverse):
|
if keyword_conditions_met(state, data, direction=TradeDirection.SHORT, keyword=KW.reverse):
|
||||||
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason="REVERSE COND MET", followup=Followup.REVERSE)
|
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason="REVERSE COND MET", followup=Followup.REVERSE)
|
||||||
@ -82,6 +80,12 @@ def eval_close_position(state: StrategyState, data):
|
|||||||
if max_price_signal or dontexit_protection_met(state=state, data=data,direction=TradeDirection.SHORT) is False:
|
if max_price_signal or dontexit_protection_met(state=state, data=data,direction=TradeDirection.SHORT) is False:
|
||||||
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason=f"PROFIT or MAXPROFIT REACHED {max_price_signal=}")
|
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason=f"PROFIT or MAXPROFIT REACHED {max_price_signal=}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
#FORCED EXIT PRI KONCI DNE
|
||||||
|
if eod_exit_activated(state, data, TradeDirection.SHORT):
|
||||||
|
close_position(state=state, data=data, direction=TradeDirection.SHORT, reason="EOD EXIT ACTIVATED")
|
||||||
|
return
|
||||||
|
|
||||||
#mame long
|
#mame long
|
||||||
elif int(state.positions) > 0:
|
elif int(state.positions) > 0:
|
||||||
|
|
||||||
@ -145,3 +149,8 @@ def eval_close_position(state: StrategyState, data):
|
|||||||
if max_price_signal or dontexit_protection_met(state, data, direction=TradeDirection.LONG) is False:
|
if max_price_signal or dontexit_protection_met(state, data, direction=TradeDirection.LONG) is False:
|
||||||
close_position(state=state, data=data, direction=TradeDirection.LONG, reason=f"PROFIT or MAXPROFIT REACHED {max_price_signal=}")
|
close_position(state=state, data=data, direction=TradeDirection.LONG, reason=f"PROFIT or MAXPROFIT REACHED {max_price_signal=}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
#FORCED EXIT PRI KONCI DNE
|
||||||
|
if eod_exit_activated(state, data, TradeDirection.LONG):
|
||||||
|
close_position(state=state, data=data, direction=TradeDirection.LONG, reason="EOD EXIT ACTIVATED")
|
||||||
|
return
|
||||||
@ -9,6 +9,7 @@ from v2realbot.common.model import SLHistory
|
|||||||
from v2realbot.config import KW
|
from v2realbot.config import KW
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
||||||
#import random
|
#import random
|
||||||
import json
|
import json
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|||||||
@ -3,7 +3,7 @@ from v2realbot.strategy.base import StrategyState
|
|||||||
from v2realbot.indicators.indicators import ema, natr, roc
|
from v2realbot.indicators.indicators import ema, natr, roc
|
||||||
from v2realbot.indicators.oscillators import rsi
|
from v2realbot.indicators.oscillators import rsi
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series, value_or_indicator
|
||||||
|
|
||||||
#RSI INDICATOR
|
#RSI INDICATOR
|
||||||
# type = RSI, source = [close, vwap, hlcc4], rsi_length = [14], MA_length = int (optional), on_confirmed_only = [true, false]
|
# type = RSI, source = [close, vwap, hlcc4], rsi_length = [14], MA_length = int (optional), on_confirmed_only = [true, false]
|
||||||
@ -22,10 +22,11 @@ def populate_dynamic_RSI_indicator(data, state: StrategyState, name):
|
|||||||
#poustet kazdy tick nebo jenom na confirmed baru (on_confirmed_only = true)
|
#poustet kazdy tick nebo jenom na confirmed baru (on_confirmed_only = true)
|
||||||
on_confirmed_only = safe_get(options, 'on_confirmed_only', False)
|
on_confirmed_only = safe_get(options, 'on_confirmed_only', False)
|
||||||
req_source = safe_get(options, 'source', 'vwap')
|
req_source = safe_get(options, 'source', 'vwap')
|
||||||
rsi_length = int(safe_get(options, "length",14))
|
rsi_length = safe_get(options, "length",14)
|
||||||
rsi_MA_length = safe_get(options, "MA_length", None)
|
rsi_MA_length = safe_get(options, "MA_length", None)
|
||||||
start = safe_get(options, "start","linear") #linear/sharp
|
start = safe_get(options, "start","linear") #linear/sharp
|
||||||
|
|
||||||
|
rsi_length = int(value_or_indicator(state, rsi_length))
|
||||||
|
|
||||||
if on_confirmed_only is False or (on_confirmed_only is True and data['confirmed']==1):
|
if on_confirmed_only is False or (on_confirmed_only is True and data['confirmed']==1):
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -4,7 +4,6 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
|
|||||||
@ -5,19 +5,20 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
#indicator allowing to be based on any bar parameter (index, high,open,close,trades,volume, etc.)
|
#indicator allowing to be based on any bar parameter (index, high,open,close,trades,volume, etc.)
|
||||||
def barparams(state, params):
|
def barparams(state, params, name):
|
||||||
funcName = "barparams"
|
funcName = "barparams"
|
||||||
if params is None:
|
if params is None:
|
||||||
return -2, "params required"
|
return -2, "params required"
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
|
lookback = safe_get(params, "lookback", 1)
|
||||||
if source is None:
|
if source is None:
|
||||||
return -2, "source required"
|
return -2, "source required"
|
||||||
try:
|
try:
|
||||||
return 0, state.bars[source][-1]
|
return 0, get_source_series(state, source)[-lookback]
|
||||||
|
#return 0, state.bars[source][-1]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return -2, str(e)+format_exc()
|
return -2, str(e)+format_exc()
|
||||||
|
|||||||
@ -4,29 +4,36 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from scipy.stats import linregress
|
from scipy.stats import linregress
|
||||||
|
from scipy.fft import fft
|
||||||
|
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
||||||
|
|
||||||
#vstupem je bud indicator nebo bar parametr
|
#vstupem je bud indicator nebo bar parametr
|
||||||
#na tomto vstupu dokaze provest zakladni statisticke funkce pro subpole X hodnot zpatky
|
#na tomto vstupu dokaze provest zakladni statisticke funkce pro subpole X hodnot zpatky
|
||||||
#podporovane functions: min, max, mean
|
#podporovane functions: min, max, mean
|
||||||
def basestats(state, params):
|
def basestats(state, params, name):
|
||||||
funcName = "basestats"
|
funcName = "basestats"
|
||||||
#name of indicator or
|
#name of indicator or
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
lookback = safe_get(params, "lookback", None)
|
lookback = safe_get(params, "lookback", None)
|
||||||
func = safe_get(params, "function", None)
|
func = safe_get(params, "function", None)
|
||||||
|
returns = safe_get(params, "returns", None)
|
||||||
|
|
||||||
source_dict = defaultdict(list)
|
source_dict = defaultdict(list)
|
||||||
source_dict[source] = get_source_series(state, source)
|
source_dict[source] = get_source_series(state, source)
|
||||||
|
|
||||||
|
self = state.indicators[name]
|
||||||
if lookback is None:
|
if lookback is None:
|
||||||
source_array = source_dict[source]
|
source_array = source_dict[source]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
lookback = int(value_or_indicator(state, lookback))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
source_array = source_dict[source][-lookback-1:]
|
source_array = source_dict[source][-lookback-1:]
|
||||||
|
self = self[-lookback-1:]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
source_array = source_dict[source]
|
source_array = source_dict[source]
|
||||||
|
|
||||||
@ -69,7 +76,7 @@ def basestats(state, params):
|
|||||||
try:
|
try:
|
||||||
np.seterr(all="raise")
|
np.seterr(all="raise")
|
||||||
val, _, _, _, _ = linregress(np.arange(len(source_array)), source_array)
|
val, _, _, _, _ = linregress(np.arange(len(source_array)), source_array)
|
||||||
val = round(val, 4)
|
val = val*1000
|
||||||
except FloatingPointError:
|
except FloatingPointError:
|
||||||
return -2, "FloatingPointError"
|
return -2, "FloatingPointError"
|
||||||
#zatim takto, dokud nebudou podporovany indikatory s vice vystupnimi
|
#zatim takto, dokud nebudou podporovany indikatory s vice vystupnimi
|
||||||
@ -82,6 +89,60 @@ def basestats(state, params):
|
|||||||
val = round(val, 4)
|
val = round(val, 4)
|
||||||
except FloatingPointError:
|
except FloatingPointError:
|
||||||
return -2, "FloatingPointError"
|
return -2, "FloatingPointError"
|
||||||
|
elif func == "fourier":
|
||||||
|
time_series = np.array(source_array)
|
||||||
|
n = len(time_series)
|
||||||
|
|
||||||
|
# Compute the Fourier transform
|
||||||
|
yf = fft(time_series)
|
||||||
|
xf = np.linspace(0.0, 1.0/(2.0), n//2)
|
||||||
|
|
||||||
|
dominant_frequencies = xf[np.argsort(np.abs(yf[:n//2]))[-3:]]
|
||||||
|
state.ilog(lvl=1,e=f"IND {name}:{funcName} 3 dominant freq are {str(dominant_frequencies)}", **params)
|
||||||
|
|
||||||
|
if returns is not None:
|
||||||
|
#vracime druhou
|
||||||
|
if returns == "second":
|
||||||
|
if len(dominant_frequencies) > 1:
|
||||||
|
val = dominant_frequencies[-2]
|
||||||
|
else:
|
||||||
|
val = 0
|
||||||
|
else:
|
||||||
|
#vracime most dominant
|
||||||
|
val = float(np.max(dominant_frequencies))
|
||||||
|
return 0, val
|
||||||
|
|
||||||
|
elif func == "maxima":
|
||||||
|
if len(source_array) < 3:
|
||||||
|
return 0, state.bars["high"]
|
||||||
|
|
||||||
|
if len(self) == 0:
|
||||||
|
self_max = 0
|
||||||
|
else:
|
||||||
|
#nejvyssi dosavadni maxima za lookback
|
||||||
|
#self_max = float(np.max(self))
|
||||||
|
#zkusim zatim takto, a dalsi indikator z toho pak bude delat lajny?
|
||||||
|
self_max = self[-2]
|
||||||
|
|
||||||
|
state.ilog(lvl=1,e=f"IND {name}:{funcName} {str(self_max)}", **params)
|
||||||
|
|
||||||
|
# 3 .. 2 nahoru
|
||||||
|
if source_array[-2] > source_array[-3]:
|
||||||
|
# 2 .. 1 dolu - mame pivot
|
||||||
|
if source_array[-2] > source_array[-1]:
|
||||||
|
##jsme max za obdobi
|
||||||
|
if source_array[-2] > self_max:
|
||||||
|
return 0, source_array[-2]
|
||||||
|
else:
|
||||||
|
return 0, self_max
|
||||||
|
# 2 .. 1 nahoru - drzime puvodni -do otocky
|
||||||
|
else:
|
||||||
|
return 0, self_max
|
||||||
|
|
||||||
|
# 3 ..2 dolu drzime max
|
||||||
|
else:
|
||||||
|
return 0, self_max
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return -2, "wrong function"
|
return -2, "wrong function"
|
||||||
|
|
||||||
|
|||||||
@ -4,7 +4,6 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series, evaluate_directive_conditions
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series, evaluate_directive_conditions
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
@ -24,7 +23,7 @@ from collections import defaultdict
|
|||||||
#novy podminkovy indikator, muze obsahovat az N podminek ve stejne syntaxy jako u signalu
|
#novy podminkovy indikator, muze obsahovat az N podminek ve stejne syntaxy jako u signalu
|
||||||
#u kazde podminky je hodnota, ktera se vraci pokud je true
|
#u kazde podminky je hodnota, ktera se vraci pokud je true
|
||||||
#hodi se pro vytvareni binarnich targetu pro ML
|
#hodi se pro vytvareni binarnich targetu pro ML
|
||||||
def conditional(state, params):
|
def conditional(state, params, name):
|
||||||
funcName = "conditional"
|
funcName = "conditional"
|
||||||
if params is None:
|
if params is None:
|
||||||
return -2, "params required"
|
return -2, "params required"
|
||||||
@ -42,13 +41,13 @@ def conditional(state, params):
|
|||||||
#zde je pripavena podminka, kterou jen evaluujeme
|
#zde je pripavena podminka, kterou jen evaluujeme
|
||||||
cond_dict = condsettings["cond_dict"]
|
cond_dict = condsettings["cond_dict"]
|
||||||
result, conditions_met = evaluate_directive_conditions(state, cond_dict, "OR")
|
result, conditions_met = evaluate_directive_conditions(state, cond_dict, "OR")
|
||||||
state.ilog(lvl=1,e=f"IND PODMINKA {condname} =OR= {result}", **conditions_met, cond_dict=cond_dict)
|
state.ilog(lvl=1,e=f"IND PODMINKA {name}:{condname} =OR= {result}", **conditions_met, cond_dict=cond_dict)
|
||||||
if result:
|
if result:
|
||||||
return 0, true_val
|
return 0, true_val
|
||||||
|
|
||||||
#OR neprosly testujeme AND
|
#OR neprosly testujeme AND
|
||||||
result, conditions_met = evaluate_directive_conditions(state, cond_dict, "AND")
|
result, conditions_met = evaluate_directive_conditions(state, cond_dict, "AND")
|
||||||
state.ilog(lvl=1,e=f"IND PODMINKA {condname} =AND= {result}", **conditions_met, cond_dict=cond_dict)
|
state.ilog(lvl=1,e=f"IND PODMINKA {name}:{condname} =AND= {result}", **conditions_met, cond_dict=cond_dict)
|
||||||
if result:
|
if result:
|
||||||
return 0, true_val
|
return 0, true_val
|
||||||
|
|
||||||
|
|||||||
@ -4,13 +4,12 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
#strength, absolute change of parameter between current value and lookback value (n-past)
|
#strength, absolute change of parameter between current value and lookback value (n-past)
|
||||||
#used for example to measure unusual peaks
|
#used for example to measure unusual peaks
|
||||||
def delta(state, params):
|
def delta(state, params, name):
|
||||||
funcName = "delta"
|
funcName = "delta"
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
lookback = safe_get(params, "lookback",1)
|
lookback = safe_get(params, "lookback",1)
|
||||||
@ -20,5 +19,5 @@ def delta(state, params):
|
|||||||
currval = source_series[-1]
|
currval = source_series[-1]
|
||||||
delta = currval - lookbackval
|
delta = currval - lookbackval
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {delta} {source=} {lookback=}", currval=currval, lookbackval=lookbackval, **params)
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {delta} {source=} {lookback=}", currval=currval, lookbackval=lookbackval, **params)
|
||||||
return 0, delta
|
return 0, delta
|
||||||
|
|||||||
@ -1,43 +0,0 @@
|
|||||||
from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, print, safe_get, is_still, is_window_open, eval_cond_dict, crossed_down, crossed_up, crossed, is_pivot, json_serial, pct_diff, create_new_bars, slice_dict_lists
|
|
||||||
from v2realbot.strategy.base import StrategyState
|
|
||||||
from v2realbot.indicators.indicators import ema, natr, roc
|
|
||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
|
||||||
from rich import print as printanyway
|
|
||||||
from traceback import format_exc
|
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
|
||||||
from collections import defaultdict
|
|
||||||
|
|
||||||
#abs/rel divergence of two indicators
|
|
||||||
def divergence(state, params):
|
|
||||||
funcName = "indicatorDivergence"
|
|
||||||
source1 = safe_get(params, "source1", None)
|
|
||||||
source1_series = get_source_series(state, source1)
|
|
||||||
source2 = safe_get(params, "source2", None)
|
|
||||||
source2_series = get_source_series(state, source2)
|
|
||||||
mode = safe_get(params, "type")
|
|
||||||
state.ilog(lvl=0,e=f"INSIDE {funcName} {source1=} {source2=} {mode=}", **params)
|
|
||||||
val = 0
|
|
||||||
if mode == "abs":
|
|
||||||
val = round(abs(float(source1_series[-1]) - float(source2_series[-1])),4)
|
|
||||||
elif mode == "absn":
|
|
||||||
val = round((abs(float(source1_series[-1]) - float(source2_series[-1])))/float(source1_series[-1]),4)
|
|
||||||
elif mode == "rel":
|
|
||||||
val = round(float(source1_series[-1]) - float(source2_series[-1]),4)
|
|
||||||
elif mode == "reln":
|
|
||||||
val = round((float(source1_series[-1]) - float(source2_series[-1]))/float(source1_series[-1]),4)
|
|
||||||
elif mode == "pctabs":
|
|
||||||
val = pct_diff(num1=float(source1_series[-1]),num2=float(source2_series[-1]), absolute=True)
|
|
||||||
elif mode == "pct":
|
|
||||||
val = pct_diff(num1=float(source1_series[-1]),num2=float(source2_series[-1]))
|
|
||||||
return 0, val
|
|
||||||
|
|
||||||
#model - naloadovana instance modelu
|
|
||||||
#seq - sekvence pro vstup
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -4,13 +4,12 @@ from v2realbot.indicators.indicators import ema as ext_ema
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
||||||
#strength, absolute change of parameter between current value and lookback value (n-past)
|
#strength, absolute change of parameter between current value and lookback value (n-past)
|
||||||
#used for example to measure unusual peaks
|
#used for example to measure unusual peaks
|
||||||
def ema(state, params):
|
def ema(state, params, name):
|
||||||
funcName = "ema"
|
funcName = "ema"
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
lookback = safe_get(params, "lookback",14)
|
lookback = safe_get(params, "lookback",14)
|
||||||
@ -22,5 +21,5 @@ def ema(state, params):
|
|||||||
ema_value = ext_ema(source_series, lookback)
|
ema_value = ext_ema(source_series, lookback)
|
||||||
val = round(ema_value[-1],4)
|
val = round(ema_value[-1],4)
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {val} {source=} {lookback=}", **params)
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {val} {source=} {lookback=}", **params)
|
||||||
return 0, val
|
return 0, val
|
||||||
@ -1,6 +1,10 @@
|
|||||||
from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, print, safe_get, is_still, is_window_open, eval_cond_dict, crossed_down, crossed_up, crossed, is_pivot, json_serial, pct_diff, create_new_bars, slice_dict_lists
|
|
||||||
from v2realbot.strategy.base import StrategyState
|
from v2realbot.strategy.base import StrategyState
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from rich import print as printanyway
|
||||||
|
from traceback import format_exc
|
||||||
|
import v2realbot.utils.utils as utls
|
||||||
|
# from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, print, safe_get, is_still, is_window_open, eval_cond_dict, crossed_down, crossed_up, crossed, is_pivot, json_serial, pct_diff, create_new_bars, slice_dict_lists
|
||||||
|
|
||||||
|
|
||||||
#allows executing a expression - pozor neni sanitized a zatim se spousti i v globalni scopu
|
#allows executing a expression - pozor neni sanitized a zatim se spousti i v globalni scopu
|
||||||
#v pripade jineho nez soukromeho uziti zabezpecit
|
#v pripade jineho nez soukromeho uziti zabezpecit
|
||||||
@ -9,24 +13,30 @@ import numpy as np
|
|||||||
#eval nyni umi i user-defined function, string operation and control statements
|
#eval nyni umi i user-defined function, string operation and control statements
|
||||||
|
|
||||||
#teroeticky se dá pouzit i SYMPY - kde se daji vytvorit jednotlive symboly s urcitou funkcni
|
#teroeticky se dá pouzit i SYMPY - kde se daji vytvorit jednotlive symboly s urcitou funkcni
|
||||||
def expression(state: StrategyState, params):
|
def expression(state: StrategyState, params, name):
|
||||||
funcName = "expression"
|
try:
|
||||||
#indicator name
|
funcName = "expression"
|
||||||
operation = safe_get(params, "expression", None)
|
#indicator name
|
||||||
|
operation = utls.safe_get(params, "expression", None)
|
||||||
|
|
||||||
if operation is None :
|
if operation is None :
|
||||||
return -2, "required param missing"
|
return -2, "required param missing"
|
||||||
|
|
||||||
state.ilog(lvl=0,e=f"BEFORE {funcName} {operation=}", **params)
|
state.ilog(lvl=0,e=f"BEFORE {name}:{funcName} {operation=}", **params)
|
||||||
|
|
||||||
#pro zacatek eval
|
#pro zacatek eval
|
||||||
val = eval(operation, {'state': state, 'np': np}, state.ind_mapping)
|
val = eval(operation, {'state': state, 'np': np, 'utls': utls}, state.ind_mapping)
|
||||||
|
|
||||||
if not np.isfinite(val):
|
#printanyway(val)
|
||||||
val = 0
|
|
||||||
#val = ne.evaluate(operation, state.ind_mapping)
|
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"IND {funcName} {operation=} res:{val}", **params)
|
if not np.isfinite(val):
|
||||||
|
val = 0
|
||||||
|
#val = ne.evaluate(operation, state.ind_mapping)
|
||||||
|
|
||||||
|
state.ilog(lvl=1,e=f"IND {name}:{funcName} {operation=} res:{val}", **params)
|
||||||
|
except Exception as e:
|
||||||
|
printanyway(name + str(e) + format_exc())
|
||||||
|
raise e
|
||||||
return 0, val
|
return 0, val
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -4,22 +4,23 @@ import v2realbot.indicators.moving_averages as mi
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
||||||
|
# from talib import BBANDS, MACD, RSI, MA_Type
|
||||||
|
|
||||||
|
|
||||||
#IMPLEMENTS different types of moving averages in package v2realbot.indicators.moving_averages
|
#IMPLEMENTS different types of moving averages in package v2realbot.indicators.moving_averages
|
||||||
def ma(state, params):
|
def ma(state, params, name):
|
||||||
funcName = "ma"
|
funcName = "ma"
|
||||||
type = safe_get(params, "type", "ema")
|
type = safe_get(params, "type", "ema")
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
lookback = safe_get(params, "lookback",14)
|
lookback = safe_get(params, "lookback",14)
|
||||||
start = safe_get(params, "start","linear") #linear/sharp
|
start = safe_get(params, "start","linear") #linear/sharp
|
||||||
|
defval = safe_get(params, "defval",0)
|
||||||
#lookback muze byt odkaz na indikator, pak berem jeho hodnotu
|
#lookback muze byt odkaz na indikator, pak berem jeho hodnotu
|
||||||
lookback = int(value_or_indicator(state, lookback))
|
lookback = int(value_or_indicator(state, lookback))
|
||||||
|
defval = int(value_or_indicator(state, defval))
|
||||||
|
|
||||||
source_series = get_source_series(state, source)
|
source_series = get_source_series(state, source)
|
||||||
|
|
||||||
@ -34,7 +35,14 @@ def ma(state, params):
|
|||||||
ma_function = eval(type)
|
ma_function = eval(type)
|
||||||
|
|
||||||
ma_value = ma_function(source_series, lookback)
|
ma_value = ma_function(source_series, lookback)
|
||||||
val = round(ma_value[-1],4)
|
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {val} {type=} {source=} {lookback=}", **params)
|
if not np.isfinite(ma_value[-1]):
|
||||||
|
val = defval
|
||||||
|
else:
|
||||||
|
val = round(ma_value[-1],4)
|
||||||
|
|
||||||
|
if val == 0:
|
||||||
|
val = defval
|
||||||
|
|
||||||
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {val} {type=} {source=} {lookback=}", **params)
|
||||||
return 0, val
|
return 0, val
|
||||||
@ -3,7 +3,8 @@ from v2realbot.strategy.base import StrategyState
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series, value_or_indicator
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series, value_or_indicator
|
||||||
|
|
||||||
#allows basic mathematical operators to one or more indicators (add two indicator, add value to a indicator etc.)
|
#allows basic mathematical operators to one or more indicators (add two indicator, add value to a indicator etc.)
|
||||||
def mathop(state, params):
|
#REPLACED by EXPRESSION
|
||||||
|
def mathop(state, params, name):
|
||||||
funcName = "mathop"
|
funcName = "mathop"
|
||||||
#indicator name
|
#indicator name
|
||||||
source1 = safe_get(params, "source1", None)
|
source1 = safe_get(params, "source1", None)
|
||||||
@ -24,7 +25,7 @@ def mathop(state, params):
|
|||||||
val = round(float(source1_series[-1] * druhy),4)
|
val = round(float(source1_series[-1] * druhy),4)
|
||||||
else:
|
else:
|
||||||
return -2, "unknow operator"
|
return -2, "unknow operator"
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {source1=} {source2=} {val} {druhy=}", **params)
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {source1=} {source2=} {val} {druhy=}", **params)
|
||||||
return 0, val
|
return 0, val
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -4,11 +4,10 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
def model(state, params):
|
def model(state, params, ind_name):
|
||||||
funcName = "model"
|
funcName = "model"
|
||||||
if params is None:
|
if params is None:
|
||||||
return -2, "params required"
|
return -2, "params required"
|
||||||
|
|||||||
@ -4,17 +4,16 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
#WIP -
|
#WIP -
|
||||||
#testing custom indicator CODE
|
#testing custom indicator CODE
|
||||||
def opengap(state, params):
|
def opengap(state, params, name):
|
||||||
funcName = "opengap"
|
funcName = "opengap"
|
||||||
param1 = safe_get(params, "param1")
|
param1 = safe_get(params, "param1")
|
||||||
param2 = safe_get(params, "param2")
|
param2 = safe_get(params, "param2")
|
||||||
state.ilog(lvl=0,e=f"INSIDE {funcName} {param1=} {param2=}", **params)
|
state.ilog(lvl=0,e=f"INSIDE {name}:{funcName} {param1=} {param2=}", **params)
|
||||||
last_close = 28.45
|
last_close = 28.45
|
||||||
today_open = 29.45
|
today_open = 29.45
|
||||||
val = pct_diff(last_close, today_open)
|
val = pct_diff(last_close, today_open)
|
||||||
|
|||||||
@ -4,14 +4,13 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
import bisect
|
import bisect
|
||||||
|
|
||||||
#strength, absolute change of parameter between current value and lookback value (n-past)
|
#strength, absolute change of parameter between current value and lookback value (n-past)
|
||||||
#used for example to measure unusual peaks
|
#used for example to measure unusual peaks
|
||||||
def sameprice(state, params):
|
def sameprice(state, params, name):
|
||||||
funcName = "sameprice"
|
funcName = "sameprice"
|
||||||
typ = safe_get(params, "type", None)
|
typ = safe_get(params, "type", None)
|
||||||
|
|
||||||
@ -40,13 +39,13 @@ def sameprice(state, params):
|
|||||||
|
|
||||||
#jde o daily high
|
#jde o daily high
|
||||||
if pozice_prvniho_vetsiho == -1:
|
if pozice_prvniho_vetsiho == -1:
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {typ} {pozice_prvniho_vetsiho=} vracime 1")
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {typ} {pozice_prvniho_vetsiho=} vracime 1")
|
||||||
return 0, celkova_delka
|
return 0, celkova_delka
|
||||||
|
|
||||||
delka_k_predchozmu = celkova_delka - pozice_prvniho_vetsiho
|
delka_k_predchozmu = celkova_delka - pozice_prvniho_vetsiho
|
||||||
normalizovano = delka_k_predchozmu/celkova_delka
|
normalizovano = delka_k_predchozmu/celkova_delka
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {typ} {pozice_prvniho_vetsiho=} {celkova_delka=} {delka_k_predchozmu=} {normalizovano=}", pozice_prvniho_vetsiho=pozice_prvniho_vetsiho, celkova_delka=celkova_delka, delka_k_predchozmu=delka_k_predchozmu, **params)
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {typ} {pozice_prvniho_vetsiho=} {celkova_delka=} {delka_k_predchozmu=} {normalizovano=}", pozice_prvniho_vetsiho=pozice_prvniho_vetsiho, celkova_delka=celkova_delka, delka_k_predchozmu=delka_k_predchozmu, **params)
|
||||||
|
|
||||||
return 0, delka_k_predchozmu
|
return 0, delka_k_predchozmu
|
||||||
|
|
||||||
|
|||||||
@ -4,12 +4,11 @@ from v2realbot.indicators.indicators import ema, natr, roc
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
#rate of change - last value of source indicator vs lookback value of lookback_priceline indicator
|
#rate of change - last value of source indicator vs lookback value of lookback_priceline indicator
|
||||||
def slope(state, params):
|
def slope(state, params, name):
|
||||||
funcName = "slope"
|
funcName = "slope"
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
source_series = get_source_series(state, source)
|
source_series = get_source_series(state, source)
|
||||||
@ -31,5 +30,5 @@ def slope(state, params):
|
|||||||
slope = ((currval - lookbackprice)/abs(lookbackprice))*100
|
slope = ((currval - lookbackprice)/abs(lookbackprice))*100
|
||||||
#slope = round(slope, 4)
|
#slope = round(slope, 4)
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {slope} {source=} {lookback=}", currval_source=currval, lookbackprice=lookbackprice, lookbacktime=lookbacktime, **params)
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {slope} {source=} {lookback=}", currval_source=currval, lookbackprice=lookbackprice, lookbacktime=lookbacktime, **params)
|
||||||
return 0, slope
|
return 0, slope
|
||||||
|
|||||||
@ -5,13 +5,12 @@ from v2realbot.indicators.moving_averages import vwma as ext_vwma
|
|||||||
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
from v2realbot.strategyblocks.indicators.helpers import get_source_series
|
||||||
from rich import print as printanyway
|
from rich import print as printanyway
|
||||||
from traceback import format_exc
|
from traceback import format_exc
|
||||||
from v2realbot.ml.ml import ModelML
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
||||||
|
|
||||||
# Volume(or reference_source) Weighted moving Average
|
# Volume(or reference_source) Weighted moving Average
|
||||||
def vwma(state, params):
|
def vwma(state, params, name):
|
||||||
funcName = "vwma"
|
funcName = "vwma"
|
||||||
source = safe_get(params, "source", None)
|
source = safe_get(params, "source", None)
|
||||||
ref_source = safe_get(params, "ref_source", "volume")
|
ref_source = safe_get(params, "ref_source", "volume")
|
||||||
@ -34,5 +33,5 @@ def vwma(state, params):
|
|||||||
vwma_value = ext_vwma(source_series, ref_source_series, lookback)
|
vwma_value = ext_vwma(source_series, ref_source_series, lookback)
|
||||||
val = round(vwma_value[-1],4)
|
val = round(vwma_value[-1],4)
|
||||||
|
|
||||||
state.ilog(lvl=1,e=f"INSIDE {funcName} {val} {source=} {ref_source=} {lookback=}", **params)
|
state.ilog(lvl=1,e=f"INSIDE {name}:{funcName} {val} {source=} {ref_source=} {lookback=}", **params)
|
||||||
return 0, val
|
return 0, val
|
||||||
@ -133,7 +133,7 @@ def populate_dynamic_custom_indicator(data, state: StrategyState, name):
|
|||||||
|
|
||||||
subtype = "ci."+subtype+"."+subtype
|
subtype = "ci."+subtype+"."+subtype
|
||||||
custom_function = eval(subtype)
|
custom_function = eval(subtype)
|
||||||
res_code, new_val = custom_function(state, custom_params)
|
res_code, new_val = custom_function(state, custom_params, name)
|
||||||
if res_code == 0:
|
if res_code == 0:
|
||||||
state.indicators[name][-1-save_to_past]=new_val
|
state.indicators[name][-1-save_to_past]=new_val
|
||||||
state.ilog(lvl=1,e=f"IND {name} {subtype} VAL FROM FUNCTION: {new_val}", lastruntime=state.vars.indicators[name]["last_run_time"], lastrunindex=state.vars.indicators[name]["last_run_index"], save_to_past=save_to_past)
|
state.ilog(lvl=1,e=f"IND {name} {subtype} VAL FROM FUNCTION: {new_val}", lastruntime=state.vars.indicators[name]["last_run_time"], lastrunindex=state.vars.indicators[name]["last_run_index"], save_to_past=save_to_past)
|
||||||
@ -159,7 +159,7 @@ def populate_dynamic_custom_indicator(data, state: StrategyState, name):
|
|||||||
else:
|
else:
|
||||||
state.ilog(lvl=0,e=f"IND {name} {subtype} COND NOT READY: {msg}")
|
state.ilog(lvl=0,e=f"IND {name} {subtype} COND NOT READY: {msg}")
|
||||||
|
|
||||||
#not time to run
|
#not time to run - copy last value
|
||||||
if len(state.indicators[name]) >= 2:
|
if len(state.indicators[name]) >= 2:
|
||||||
state.indicators[name][-1]=state.indicators[name][-2]
|
state.indicators[name][-1]=state.indicators[name][-2]
|
||||||
|
|
||||||
|
|||||||
@ -71,11 +71,22 @@ def get_source_or_MA(state, indicator):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
return state.bars[indicator]
|
return state.bars[indicator]
|
||||||
|
|
||||||
def get_source_series(state, source):
|
def get_source_series(state: StrategyState, source: str):
|
||||||
try:
|
"""
|
||||||
return state.bars[source]
|
Podporujeme krome klice v bar a indikatoru a dalsi doplnujici, oddelene _ napr. dailyBars_close
|
||||||
except KeyError:
|
vezme serii static.dailyBars[close]
|
||||||
return state.indicators[source]
|
"""
|
||||||
|
|
||||||
|
split_index = source.find("|")
|
||||||
|
if split_index == -1:
|
||||||
|
try:
|
||||||
|
return state.bars[source]
|
||||||
|
except KeyError:
|
||||||
|
return state.indicators[source]
|
||||||
|
else:
|
||||||
|
dict_name = source[:split_index]
|
||||||
|
key = source[split_index + 1:]
|
||||||
|
return getattr(state, dict_name)[key]
|
||||||
|
|
||||||
#TYTO NEJSPIS DAT do util
|
#TYTO NEJSPIS DAT do util
|
||||||
#vrati true pokud dany indikator prekrocil threshold dolu
|
#vrati true pokud dany indikator prekrocil threshold dolu
|
||||||
|
|||||||
@ -4,7 +4,6 @@ from v2realbot.enums.enums import RecordType, StartBarAlign, Mode, Account, Foll
|
|||||||
from v2realbot.common.PrescribedTradeModel import Trade, TradeDirection, TradeStatus
|
from v2realbot.common.PrescribedTradeModel import Trade, TradeDirection, TradeStatus
|
||||||
from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, print, safe_get, is_still, is_window_open, eval_cond_dict, crossed_down, crossed_up, crossed, is_pivot, json_serial, pct_diff, create_new_bars, slice_dict_lists
|
from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, print, safe_get, is_still, is_window_open, eval_cond_dict, crossed_down, crossed_up, crossed, is_pivot, json_serial, pct_diff, create_new_bars, slice_dict_lists
|
||||||
from v2realbot.utils.directive_utils import get_conditions_from_configuration
|
from v2realbot.utils.directive_utils import get_conditions_from_configuration
|
||||||
from v2realbot.ml.mlutils import load_model
|
|
||||||
from v2realbot.common.model import SLHistory
|
from v2realbot.common.model import SLHistory
|
||||||
from v2realbot.config import KW
|
from v2realbot.config import KW
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|||||||
@ -9,6 +9,7 @@ from v2realbot.common.model import SLHistory
|
|||||||
from v2realbot.config import KW
|
from v2realbot.config import KW
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from v2realbot.strategyblocks.indicators.helpers import value_or_indicator
|
||||||
#import random
|
#import random
|
||||||
import json
|
import json
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -106,6 +107,8 @@ def common_go_preconditions_check(state, data, signalname: str, options: dict):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
next_signal_offset = safe_get(options, "next_signal_offset_from_last_exit",safe_get(state.vars, "next_signal_offset_from_last_exit",0))
|
next_signal_offset = safe_get(options, "next_signal_offset_from_last_exit",safe_get(state.vars, "next_signal_offset_from_last_exit",0))
|
||||||
|
#muze byt i indikator
|
||||||
|
next_signal_offset = int(value_or_indicator(state, next_signal_offset))
|
||||||
|
|
||||||
if state.vars.last_exit_index is not None:
|
if state.vars.last_exit_index is not None:
|
||||||
index_to_compare = int(state.vars.last_exit_index)+int(next_signal_offset)
|
index_to_compare = int(state.vars.last_exit_index)+int(next_signal_offset)
|
||||||
|
|||||||
92
v2realbot/utils/historicals.py
Normal file
92
v2realbot/utils/historicals.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
from alpaca.data.historical import StockHistoricalDataClient, CryptoHistoricalDataClient
|
||||||
|
from alpaca.data.requests import StockLatestQuoteRequest, StockBarsRequest, StockTradesRequest, StockSnapshotRequest
|
||||||
|
from alpaca.data import Quote, Trade, Snapshot, Bar
|
||||||
|
from alpaca.data.models import BarSet, QuoteSet, TradeSet
|
||||||
|
from alpaca.data.timeframe import TimeFrame, TimeFrameUnit
|
||||||
|
from v2realbot.utils.utils import zoneNY
|
||||||
|
from v2realbot.config import ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY
|
||||||
|
from alpaca.data.enums import DataFeed
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import pandas as pd
|
||||||
|
from rich import print
|
||||||
|
from collections import defaultdict
|
||||||
|
from pandas import to_datetime
|
||||||
|
from msgpack.ext import Timestamp
|
||||||
|
|
||||||
|
def convert_daily_bars(daily_bars):
|
||||||
|
"""Converts a list of daily bars into a dictionary with the specified keys.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
daily_bars: A list of daily bars, where each bar is a dictionary with the
|
||||||
|
following keys:
|
||||||
|
* c: Close price
|
||||||
|
* h: High price
|
||||||
|
* l: Low price
|
||||||
|
* n: Number of trades
|
||||||
|
* o: Open price
|
||||||
|
* t: Time in UTC (ISO 8601 format)
|
||||||
|
* v: Volume
|
||||||
|
* vw: VWAP
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary with the following keys:
|
||||||
|
* high: A list of high prices
|
||||||
|
* low: A list of low prices
|
||||||
|
* volume: A list of volumes
|
||||||
|
* close: A list of close prices
|
||||||
|
* hlcc4: A list of HLCC4 indicators
|
||||||
|
* open: A list of open prices
|
||||||
|
* time: A list of times in UTC (ISO 8601 format)
|
||||||
|
* trades: A list of number of trades
|
||||||
|
* resolution: A list of resolutions (all set to 'D')
|
||||||
|
* confirmed: A list of booleans (all set to True)
|
||||||
|
* vwap: A list of VWAP indicator
|
||||||
|
* updated: A list of booleans (all set to True)
|
||||||
|
* index: A list of integers (from 0 to the length of the list of daily bars)
|
||||||
|
"""
|
||||||
|
|
||||||
|
bars = defaultdict(list)
|
||||||
|
for i in range(len(daily_bars)):
|
||||||
|
bar = daily_bars[i]
|
||||||
|
|
||||||
|
# Calculate the HLCC4 indicator
|
||||||
|
hlcc4 = (bar['h'] + bar['l'] + bar['c'] + bar['o']) / 4
|
||||||
|
datum = to_datetime(bar['t'], utc=True)
|
||||||
|
|
||||||
|
#nebo pripadna homogenizace s online streamem
|
||||||
|
#datum = Timestamp.from_unix(datum.timestamp())
|
||||||
|
|
||||||
|
# Add the bar to the dictionary
|
||||||
|
bars['high'].append(bar['h'])
|
||||||
|
bars['low'].append(bar['l'])
|
||||||
|
bars['volume'].append(bar['v'])
|
||||||
|
bars['close'].append(bar['c'])
|
||||||
|
bars['hlcc4'].append(hlcc4)
|
||||||
|
bars['open'].append(bar['o'])
|
||||||
|
bars['time'].append(datum)
|
||||||
|
bars['trades'].append(bar['n'])
|
||||||
|
bars['resolution'].append('D')
|
||||||
|
bars['confirmed'].append(1)
|
||||||
|
bars['vwap'].append(bar['vw'])
|
||||||
|
bars['updated'].append(datum)
|
||||||
|
bars['index'].append(i)
|
||||||
|
|
||||||
|
return bars
|
||||||
|
|
||||||
|
def get_last_close():
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_todays_open():
|
||||||
|
pass
|
||||||
|
|
||||||
|
##vrati historicke bary v nasem formatu
|
||||||
|
def get_historical_bars(symbol: str, time_from: datetime, time_to: datetime, timeframe: TimeFrame):
|
||||||
|
stock_client = StockHistoricalDataClient(ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, raw_data=True)
|
||||||
|
# snapshotRequest = StockSnapshotRequest(symbol_or_symbols=[symbol], feed=DataFeed.SIP)
|
||||||
|
# snapshotResponse = stock_client.get_stock_snapshot(snapshotRequest)
|
||||||
|
# print("snapshot", snapshotResponse)
|
||||||
|
|
||||||
|
bar_request = StockBarsRequest(symbol_or_symbols=symbol,timeframe=timeframe, start=time_from, end=time_to, feed=DataFeed.SIP)
|
||||||
|
bars: BarSet = stock_client.get_stock_bars(bar_request)
|
||||||
|
##print("puvodni bars", bars["BAC"])
|
||||||
|
return convert_daily_bars(bars[symbol])
|
||||||
16
v2realbot/utils/profitloss.py
Normal file
16
v2realbot/utils/profitloss.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
|
||||||
|
|
||||||
|
def calculate_relative_profit_loss(entry_price, exit_price):
|
||||||
|
"""Calculates the relative profit/loss in percents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
entry_price: The entry price.
|
||||||
|
exit_price: The exit price.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The relative profit/loss in percents.
|
||||||
|
"""
|
||||||
|
|
||||||
|
relative_profit_loss = (exit_price - entry_price) / entry_price * 100
|
||||||
|
return relative_profit_loss
|
||||||
|
|
||||||
@ -152,12 +152,12 @@ def is_pivot(source: list, leg_number: int, type: str = "A"):
|
|||||||
right_leg = source[-leg_number:]
|
right_leg = source[-leg_number:]
|
||||||
|
|
||||||
if type == "A":
|
if type == "A":
|
||||||
if isrising(left_leg) and isfalling(right_leg):
|
if isrisingc(left_leg) and isfallingc(right_leg):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
elif type == "V":
|
elif type == "V":
|
||||||
if isfalling(left_leg) and isrising(right_leg):
|
if isfallingc(left_leg) and isrisingc(right_leg):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
@ -165,6 +165,7 @@ def is_pivot(source: list, leg_number: int, type: str = "A"):
|
|||||||
print("Unknown type")
|
print("Unknown type")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def crossed_up(threshold, list):
|
def crossed_up(threshold, list):
|
||||||
"""check if threshold has crossed up last thresholdue in list"""
|
"""check if threshold has crossed up last thresholdue in list"""
|
||||||
try:
|
try:
|
||||||
@ -324,6 +325,7 @@ def json_serial(obj):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
type_map = {
|
type_map = {
|
||||||
|
pd.Timestamp: lambda obj: obj.timestamp(),
|
||||||
datetime: lambda obj: obj.timestamp(),
|
datetime: lambda obj: obj.timestamp(),
|
||||||
UUID: lambda obj: str(obj),
|
UUID: lambda obj: str(obj),
|
||||||
Enum: lambda obj: str(obj),
|
Enum: lambda obj: str(obj),
|
||||||
|
|||||||
Reference in New Issue
Block a user