Compare commits
5 Commits
feature/ve
...
researchad
| Author | SHA1 | Date | |
|---|---|---|---|
| 8868c6271b | |||
| 8e89347f5c | |||
| 1659cc7a6e | |||
| 05b7725a25 | |||
| 3de7d23009 |
@ -1,21 +1,34 @@
|
||||
absl-py==2.0.0
|
||||
alpaca==1.0.0
|
||||
alpaca-py==0.7.1
|
||||
alpaca-py==0.18.1
|
||||
altair==4.2.2
|
||||
annotated-types==0.6.0
|
||||
anyio==3.6.2
|
||||
appdirs==1.4.4
|
||||
appnope==0.1.3
|
||||
APScheduler==3.10.4
|
||||
argon2-cffi==23.1.0
|
||||
argon2-cffi-bindings==21.2.0
|
||||
arrow==1.3.0
|
||||
asttokens==2.2.1
|
||||
astunparse==1.6.3
|
||||
async-lru==2.0.4
|
||||
attrs==22.2.0
|
||||
Babel==2.15.0
|
||||
beautifulsoup4==4.12.3
|
||||
better-exceptions==0.3.3
|
||||
bleach==6.0.0
|
||||
blinker==1.5
|
||||
bottle==0.12.25
|
||||
cachetools==5.3.0
|
||||
CD==1.1.0
|
||||
certifi==2022.12.7
|
||||
cffi==1.16.0
|
||||
chardet==5.1.0
|
||||
charset-normalizer==3.0.1
|
||||
click==8.1.3
|
||||
colorama==0.4.6
|
||||
comm==0.1.4
|
||||
contourpy==1.0.7
|
||||
cycler==0.11.0
|
||||
dash==2.9.1
|
||||
@ -23,90 +36,189 @@ dash-bootstrap-components==1.4.1
|
||||
dash-core-components==2.0.0
|
||||
dash-html-components==2.0.0
|
||||
dash-table==5.0.0
|
||||
dateparser==1.1.8
|
||||
debugpy==1.8.1
|
||||
decorator==5.1.1
|
||||
defusedxml==0.7.1
|
||||
dill==0.3.7
|
||||
dm-tree==0.1.8
|
||||
entrypoints==0.4
|
||||
exceptiongroup==1.1.3
|
||||
executing==1.2.0
|
||||
fastapi==0.95.0
|
||||
fastapi==0.109.2
|
||||
fastjsonschema==2.19.1
|
||||
filelock==3.13.1
|
||||
Flask==2.2.3
|
||||
flatbuffers==23.5.26
|
||||
fonttools==4.39.0
|
||||
fpdf2==2.7.6
|
||||
fqdn==1.5.1
|
||||
gast==0.4.0
|
||||
gitdb==4.0.10
|
||||
GitPython==3.1.31
|
||||
google-auth==2.23.0
|
||||
google-auth-oauthlib==1.0.0
|
||||
google-pasta==0.2.0
|
||||
greenlet==3.0.3
|
||||
grpcio==1.58.0
|
||||
h11==0.14.0
|
||||
h5py==3.9.0
|
||||
h5py==3.10.0
|
||||
html2text==2024.2.26
|
||||
httpcore==1.0.5
|
||||
httpx==0.27.0
|
||||
humanize==4.9.0
|
||||
icecream==2.1.3
|
||||
idna==3.4
|
||||
imageio==2.31.6
|
||||
importlib-metadata==6.1.0
|
||||
ipykernel==6.29.4
|
||||
ipython==8.17.2
|
||||
ipywidgets==8.1.1
|
||||
isoduration==20.11.0
|
||||
itables==2.0.1
|
||||
itsdangerous==2.1.2
|
||||
jax==0.4.23
|
||||
jaxlib==0.4.23
|
||||
jedi==0.19.1
|
||||
Jinja2==3.1.2
|
||||
joblib==1.3.2
|
||||
jsonschema==4.17.3
|
||||
keras==2.13.1
|
||||
json5==0.9.25
|
||||
jsonpointer==2.4
|
||||
jsonschema==4.22.0
|
||||
jsonschema-specifications==2023.12.1
|
||||
jupyter-events==0.10.0
|
||||
jupyter-lsp==2.2.5
|
||||
jupyter_client==8.6.1
|
||||
jupyter_core==5.7.2
|
||||
jupyter_server==2.14.0
|
||||
jupyter_server_terminals==0.5.3
|
||||
jupyterlab==4.1.8
|
||||
jupyterlab-widgets==3.0.9
|
||||
jupyterlab_pygments==0.3.0
|
||||
jupyterlab_server==2.27.1
|
||||
kaleido==0.2.1
|
||||
keras==3.0.2
|
||||
keras-core==0.1.7
|
||||
keras-nightly==3.0.3.dev2024010203
|
||||
keras-nlp-nightly==0.7.0.dev2024010203
|
||||
keras-tcn @ git+https://github.com/drew2323/keras-tcn.git@4bddb17a02cb2f31c9fe2e8f616b357b1ddb0e11
|
||||
kiwisolver==1.4.4
|
||||
libclang==16.0.6
|
||||
lightweight-charts @ git+https://github.com/drew2323/lightweight-charts-python@10fd42f785182edfbf6b46a19a4ef66e85985a23
|
||||
llvmlite==0.39.1
|
||||
Markdown==3.4.3
|
||||
markdown-it-py==2.2.0
|
||||
MarkupSafe==2.1.2
|
||||
matplotlib==3.7.1
|
||||
matplotlib==3.8.2
|
||||
matplotlib-inline==0.1.6
|
||||
mdurl==0.1.2
|
||||
mistune==3.0.2
|
||||
ml-dtypes==0.3.1
|
||||
mlroom @ git+https://github.com/drew2323/mlroom.git@692900e274c4e0542d945d231645c270fc508437
|
||||
mplfinance==0.12.10b0
|
||||
msgpack==1.0.4
|
||||
mypy-extensions==1.0.0
|
||||
namex==0.0.7
|
||||
nbclient==0.10.0
|
||||
nbconvert==7.16.4
|
||||
nbformat==5.10.4
|
||||
nest-asyncio==1.6.0
|
||||
newtulipy==0.4.6
|
||||
numpy==1.24.2
|
||||
notebook_shim==0.2.4
|
||||
numba==0.56.4
|
||||
numpy==1.23.5
|
||||
oauthlib==3.2.2
|
||||
opt-einsum==3.3.0
|
||||
orjson==3.9.10
|
||||
overrides==7.7.0
|
||||
packaging==23.0
|
||||
pandas==1.5.3
|
||||
pandas==2.2.1
|
||||
pandocfilters==1.5.1
|
||||
param==1.13.0
|
||||
parso==0.8.3
|
||||
patsy==0.5.6
|
||||
pexpect==4.8.0
|
||||
Pillow==9.4.0
|
||||
plotly==5.13.1
|
||||
platformdirs==4.2.0
|
||||
plotly==5.22.0
|
||||
prometheus_client==0.20.0
|
||||
prompt-toolkit==3.0.39
|
||||
proto-plus==1.22.2
|
||||
protobuf==3.20.3
|
||||
proxy-tools==0.1.0
|
||||
psutil==5.9.8
|
||||
ptyprocess==0.7.0
|
||||
pure-eval==0.2.2
|
||||
pyarrow==11.0.0
|
||||
pyasn1==0.4.8
|
||||
pyasn1-modules==0.2.8
|
||||
pycparser==2.22
|
||||
pyct==0.5.0
|
||||
pydantic==1.10.5
|
||||
pydantic==2.6.4
|
||||
pydantic_core==2.16.3
|
||||
pydeck==0.8.0
|
||||
Pygments==2.14.0
|
||||
pyinstrument==4.5.3
|
||||
Pympler==1.0.1
|
||||
pyobjc-core==10.3
|
||||
pyobjc-framework-Cocoa==10.3
|
||||
pyobjc-framework-Security==10.3
|
||||
pyobjc-framework-WebKit==10.3
|
||||
pyparsing==3.0.9
|
||||
pyrsistent==0.19.3
|
||||
pysos==1.3.0
|
||||
python-dateutil==2.8.2
|
||||
python-dotenv==1.0.0
|
||||
python-json-logger==2.0.7
|
||||
python-multipart==0.0.6
|
||||
pytz==2022.7.1
|
||||
pytz-deprecation-shim==0.1.0.post0
|
||||
pyviz-comms==2.2.1
|
||||
PyWavelets==1.5.0
|
||||
pywebview==5.1
|
||||
PyYAML==6.0
|
||||
pyzmq==25.1.2
|
||||
referencing==0.35.1
|
||||
regex==2023.10.3
|
||||
requests==2.31.0
|
||||
requests-oauthlib==1.3.1
|
||||
rfc3339-validator==0.1.4
|
||||
rfc3986-validator==0.1.1
|
||||
rich==13.3.1
|
||||
rpds-py==0.18.0
|
||||
rsa==4.9
|
||||
scikit-learn==1.3.1
|
||||
schedule==1.2.1
|
||||
scikit-learn==1.3.2
|
||||
scipy==1.11.2
|
||||
seaborn==0.12.2
|
||||
semver==2.13.0
|
||||
Send2Trash==1.8.3
|
||||
six==1.16.0
|
||||
smmap==5.0.0
|
||||
sniffio==1.3.0
|
||||
soupsieve==2.5
|
||||
SQLAlchemy==2.0.27
|
||||
sseclient-py==1.7.2
|
||||
starlette==0.26.1
|
||||
stack-data==0.6.3
|
||||
starlette==0.36.3
|
||||
statsmodels==0.14.1
|
||||
streamlit==1.20.0
|
||||
structlog==23.1.0
|
||||
TA-Lib==0.4.28
|
||||
tb-nightly==2.16.0a20240102
|
||||
tenacity==8.2.2
|
||||
tensorboard==2.13.0
|
||||
tensorboard==2.15.1
|
||||
tensorboard-data-server==0.7.1
|
||||
tensorflow==2.13.0
|
||||
tensorflow-estimator==2.13.0
|
||||
tensorflow-addons==0.23.0
|
||||
tensorflow-estimator==2.15.0
|
||||
tensorflow-io-gcs-filesystem==0.34.0
|
||||
termcolor==2.3.0
|
||||
terminado==0.18.1
|
||||
tf-estimator-nightly==2.14.0.dev2023080308
|
||||
tf-nightly==2.16.0.dev20240101
|
||||
tf_keras-nightly==2.16.0.dev2023123010
|
||||
threadpoolctl==3.2.0
|
||||
tinycss2==1.3.0
|
||||
tinydb==4.7.1
|
||||
tinydb-serialization==2.1.0
|
||||
tinyflux==0.4.0
|
||||
@ -115,15 +227,24 @@ tomli==2.0.1
|
||||
toolz==0.12.0
|
||||
tornado==6.2
|
||||
tqdm==4.65.0
|
||||
typing_extensions==4.5.0
|
||||
traitlets==5.13.0
|
||||
typeguard==2.13.3
|
||||
types-python-dateutil==2.9.0.20240316
|
||||
typing_extensions==4.9.0
|
||||
tzdata==2023.2
|
||||
tzlocal==4.3
|
||||
uri-template==1.3.0
|
||||
urllib3==1.26.14
|
||||
uvicorn==0.21.1
|
||||
#-e git+https://github.com/drew2323/v2trading.git@940348412f67ecd551ef8d0aaedf84452abf1320#egg=v2realbot
|
||||
-e git+https://github.com/drew2323/v2trading.git@78f2162d59753c243e374dd52c305e111affebb9#egg=v2realbot
|
||||
validators==0.20.0
|
||||
vectorbtpro @ file:///Users/davidbrazda/Downloads/vectorbt.pro-2024.2.22
|
||||
wcwidth==0.2.9
|
||||
webcolors==1.13
|
||||
webencodings==0.5.1
|
||||
websockets==10.4
|
||||
websocket-client==1.7.0
|
||||
websockets==11.0.3
|
||||
Werkzeug==2.2.3
|
||||
wrapt==1.15.0
|
||||
widgetsnbextension==4.0.9
|
||||
wrapt==1.14.1
|
||||
zipp==3.15.0
|
||||
|
||||
41438
research/basic.ipynb
41438
research/basic.ipynb
File diff suppressed because one or more lines are too long
410
research/get_trades_at_once.ipynb
Normal file
410
research/get_trades_at_once.ipynb
Normal file
@ -0,0 +1,410 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Loading trades and vectorized aggregation\n",
|
||||
"Describes how to fetch trades (remote/cached) and use new vectorized aggregation to aggregate bars of given type (time, volume, dollar) and resolution\n",
|
||||
"\n",
|
||||
"`fetch_trades_parallel` enables to fetch trades of given symbol and interval, also can filter conditions and minimum size. return `trades_df`\n",
|
||||
"`aggregate_trades` acceptss `trades_df` and ressolution and type of bars (VOLUME, TIME, DOLLAR) and return aggregated ohlcv dataframe `ohlcv_df`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Activating profile profile1\n",
|
||||
"</pre>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"Activating profile profile1\n"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"trades_df-BAC-2024-01-11T09:30:00-2024-01-12T16:00:00.parquet\n",
|
||||
"trades_df-SPY-2024-01-01T09:30:00-2024-05-14T16:00:00.parquet\n",
|
||||
"ohlcv_df-BAC-2024-01-11T09:30:00-2024-01-12T16:00:00.parquet\n",
|
||||
"ohlcv_df-SPY-2024-01-01T09:30:00-2024-05-14T16:00:00.parquet\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from numba import jit\n",
|
||||
"from alpaca.data.historical import StockHistoricalDataClient\n",
|
||||
"from v2realbot.config import ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, DATA_DIR\n",
|
||||
"from alpaca.data.requests import StockTradesRequest\n",
|
||||
"from v2realbot.enums.enums import BarType\n",
|
||||
"import time\n",
|
||||
"from datetime import datetime\n",
|
||||
"from v2realbot.utils.utils import parse_alpaca_timestamp, ltp, zoneNY, send_to_telegram, fetch_calendar_data\n",
|
||||
"import pyarrow\n",
|
||||
"from v2realbot.loader.aggregator_vectorized import fetch_daily_stock_trades, fetch_trades_parallel, generate_time_bars_nb, aggregate_trades\n",
|
||||
"import vectorbtpro as vbt\n",
|
||||
"import v2realbot.utils.config_handler as cfh\n",
|
||||
"\n",
|
||||
"vbt.settings.set_theme(\"dark\")\n",
|
||||
"vbt.settings['plotting']['layout']['width'] = 1280\n",
|
||||
"vbt.settings.plotting.auto_rangebreaks = True\n",
|
||||
"# Set the option to display with pagination\n",
|
||||
"pd.set_option('display.notebook_repr_html', True)\n",
|
||||
"pd.set_option('display.max_rows', 20) # Number of rows per page\n",
|
||||
"# pd.set_option('display.float_format', '{:.9f}'.format)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#trade filtering\n",
|
||||
"exclude_conditions = cfh.config_handler.get_val('AGG_EXCLUDED_TRADES') #standard ['C','O','4','B','7','V','P','W','U','Z','F']\n",
|
||||
"minsize = 100\n",
|
||||
"\n",
|
||||
"symbol = \"SPY\"\n",
|
||||
"#datetime in zoneNY \n",
|
||||
"day_start = datetime(2024, 1, 1, 9, 30, 0)\n",
|
||||
"day_stop = datetime(2024, 1, 14, 16, 00, 0)\n",
|
||||
"day_start = zoneNY.localize(day_start)\n",
|
||||
"day_stop = zoneNY.localize(day_stop)\n",
|
||||
"#filename of trades_df parquet, date are in isoformat but without time zone part\n",
|
||||
"dir = DATA_DIR + \"/notebooks/\"\n",
|
||||
"#parquet interval cache contains exclude conditions and minsize filtering\n",
|
||||
"file_trades = dir + f\"trades_df-{symbol}-{day_start.strftime('%Y-%m-%dT%H:%M:%S')}-{day_stop.strftime('%Y-%m-%dT%H:%M:%S')}-{exclude_conditions}-{minsize}.parquet\"\n",
|
||||
"#file_trades = dir + f\"trades_df-{symbol}-{day_start.strftime('%Y-%m-%dT%H:%M:%S')}-{day_stop.strftime('%Y-%m-%dT%H:%M:%S')}.parquet\"\n",
|
||||
"file_ohlcv = dir + f\"ohlcv_df-{symbol}-{day_start.strftime('%Y-%m-%dT%H:%M:%S')}-{day_stop.strftime('%Y-%m-%dT%H:%M:%S')}-{exclude_conditions}-{minsize}.parquet\"\n",
|
||||
"\n",
|
||||
"#PRINT all parquet in directory\n",
|
||||
"import os\n",
|
||||
"files = [f for f in os.listdir(dir) if f.endswith(\".parquet\")]\n",
|
||||
"for f in files:\n",
|
||||
" print(f)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOT FOUND. Fetching from remote\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[2], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m trades_df \u001b[38;5;241m=\u001b[39m \u001b[43mfetch_daily_stock_trades\u001b[49m\u001b[43m(\u001b[49m\u001b[43msymbol\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mday_start\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mday_stop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexclude_conditions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mexclude_conditions\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mminsize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mminsize\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mforce_remote\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_retries\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m5\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbackoff_factor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2\u001b[0m trades_df\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/v2realbot/loader/aggregator_vectorized.py:200\u001b[0m, in \u001b[0;36mfetch_daily_stock_trades\u001b[0;34m(symbol, start, end, exclude_conditions, minsize, force_remote, max_retries, backoff_factor)\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m attempt \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(max_retries):\n\u001b[1;32m 199\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 200\u001b[0m tradesResponse \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_stock_trades\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstockTradeRequest\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 201\u001b[0m is_empty \u001b[38;5;241m=\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m tradesResponse[symbol]\n\u001b[1;32m 202\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRemote fetched: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mis_empty\u001b[38;5;132;01m=}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, start, end)\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/alpaca/data/historical/stock.py:144\u001b[0m, in \u001b[0;36mStockHistoricalDataClient.get_stock_trades\u001b[0;34m(self, request_params)\u001b[0m\n\u001b[1;32m 141\u001b[0m params \u001b[38;5;241m=\u001b[39m request_params\u001b[38;5;241m.\u001b[39mto_request_fields()\n\u001b[1;32m 143\u001b[0m \u001b[38;5;66;03m# paginated get request for market data api\u001b[39;00m\n\u001b[0;32m--> 144\u001b[0m raw_trades \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_data_get\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 145\u001b[0m \u001b[43m \u001b[49m\u001b[43mendpoint_data_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrades\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 146\u001b[0m \u001b[43m \u001b[49m\u001b[43mendpoint_asset_class\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstocks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 147\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_version\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mv2\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 148\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 149\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 151\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_use_raw_data:\n\u001b[1;32m 152\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m raw_trades\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/alpaca/data/historical/stock.py:338\u001b[0m, in \u001b[0;36mStockHistoricalDataClient._data_get\u001b[0;34m(self, endpoint_asset_class, endpoint_data_type, api_version, symbol_or_symbols, limit, page_limit, extension, **kwargs)\u001b[0m\n\u001b[1;32m 335\u001b[0m params[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlimit\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m actual_limit\n\u001b[1;32m 336\u001b[0m params[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpage_token\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m page_token\n\u001b[0;32m--> 338\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mapi_version\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_version\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 340\u001b[0m \u001b[38;5;66;03m# TODO: Merge parsing if possible\u001b[39;00m\n\u001b[1;32m 341\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m extension \u001b[38;5;241m==\u001b[39m DataExtensionType\u001b[38;5;241m.\u001b[39mSNAPSHOT:\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/alpaca/common/rest.py:221\u001b[0m, in \u001b[0;36mRESTClient.get\u001b[0;34m(self, path, data, **kwargs)\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget\u001b[39m(\u001b[38;5;28mself\u001b[39m, path: \u001b[38;5;28mstr\u001b[39m, data: Union[\u001b[38;5;28mdict\u001b[39m, \u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m HTTPResult:\n\u001b[1;32m 211\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Performs a single GET request\u001b[39;00m\n\u001b[1;32m 212\u001b[0m \n\u001b[1;32m 213\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 219\u001b[0m \u001b[38;5;124;03m dict: The response\u001b[39;00m\n\u001b[1;32m 220\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 221\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mGET\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/alpaca/common/rest.py:129\u001b[0m, in \u001b[0;36mRESTClient._request\u001b[0;34m(self, method, path, data, base_url, api_version)\u001b[0m\n\u001b[1;32m 127\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m retry \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 129\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_one_request\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mopts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretry\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 130\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m RetryException:\n\u001b[1;32m 131\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_retry_wait)\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/alpaca/common/rest.py:193\u001b[0m, in \u001b[0;36mRESTClient._one_request\u001b[0;34m(self, method, url, opts, retry)\u001b[0m\n\u001b[1;32m 174\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_one_request\u001b[39m(\u001b[38;5;28mself\u001b[39m, method: \u001b[38;5;28mstr\u001b[39m, url: \u001b[38;5;28mstr\u001b[39m, opts: \u001b[38;5;28mdict\u001b[39m, retry: \u001b[38;5;28mint\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mdict\u001b[39m:\n\u001b[1;32m 175\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Perform one request, possibly raising RetryException in the case\u001b[39;00m\n\u001b[1;32m 176\u001b[0m \u001b[38;5;124;03m the response is 429. Otherwise, if error text contain \"code\" string,\u001b[39;00m\n\u001b[1;32m 177\u001b[0m \u001b[38;5;124;03m then it decodes to json object and returns APIError.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[38;5;124;03m dict: The response data\u001b[39;00m\n\u001b[1;32m 192\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 193\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_session\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mopts\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 195\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 196\u001b[0m response\u001b[38;5;241m.\u001b[39mraise_for_status()\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/requests/sessions.py:589\u001b[0m, in \u001b[0;36mSession.request\u001b[0;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[1;32m 584\u001b[0m send_kwargs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 585\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m: timeout,\n\u001b[1;32m 586\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mallow_redirects\u001b[39m\u001b[38;5;124m\"\u001b[39m: allow_redirects,\n\u001b[1;32m 587\u001b[0m }\n\u001b[1;32m 588\u001b[0m send_kwargs\u001b[38;5;241m.\u001b[39mupdate(settings)\n\u001b[0;32m--> 589\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprep\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43msend_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 591\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/requests/sessions.py:703\u001b[0m, in \u001b[0;36mSession.send\u001b[0;34m(self, request, **kwargs)\u001b[0m\n\u001b[1;32m 700\u001b[0m start \u001b[38;5;241m=\u001b[39m preferred_clock()\n\u001b[1;32m 702\u001b[0m \u001b[38;5;66;03m# Send the request\u001b[39;00m\n\u001b[0;32m--> 703\u001b[0m r \u001b[38;5;241m=\u001b[39m \u001b[43madapter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 705\u001b[0m \u001b[38;5;66;03m# Total elapsed time of the request (approximately)\u001b[39;00m\n\u001b[1;32m 706\u001b[0m elapsed \u001b[38;5;241m=\u001b[39m preferred_clock() \u001b[38;5;241m-\u001b[39m start\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/requests/adapters.py:486\u001b[0m, in \u001b[0;36mHTTPAdapter.send\u001b[0;34m(self, request, stream, timeout, verify, cert, proxies)\u001b[0m\n\u001b[1;32m 483\u001b[0m timeout \u001b[38;5;241m=\u001b[39m TimeoutSauce(connect\u001b[38;5;241m=\u001b[39mtimeout, read\u001b[38;5;241m=\u001b[39mtimeout)\n\u001b[1;32m 485\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 486\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[43mconn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43murlopen\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 487\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 488\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 489\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 490\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 491\u001b[0m \u001b[43m \u001b[49m\u001b[43mredirect\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 492\u001b[0m \u001b[43m \u001b[49m\u001b[43massert_same_host\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 493\u001b[0m \u001b[43m \u001b[49m\u001b[43mpreload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 494\u001b[0m \u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 495\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmax_retries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 496\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 497\u001b[0m \u001b[43m \u001b[49m\u001b[43mchunked\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mchunked\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 498\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 500\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (ProtocolError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m err:\n\u001b[1;32m 501\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m(err, request\u001b[38;5;241m=\u001b[39mrequest)\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:703\u001b[0m, in \u001b[0;36mHTTPConnectionPool.urlopen\u001b[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)\u001b[0m\n\u001b[1;32m 700\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_proxy(conn)\n\u001b[1;32m 702\u001b[0m \u001b[38;5;66;03m# Make the request on the httplib connection object.\u001b[39;00m\n\u001b[0;32m--> 703\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_make_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 704\u001b[0m \u001b[43m \u001b[49m\u001b[43mconn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 705\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 706\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 707\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout_obj\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 708\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 709\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 710\u001b[0m \u001b[43m \u001b[49m\u001b[43mchunked\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mchunked\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 711\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 713\u001b[0m \u001b[38;5;66;03m# If we're going to release the connection in ``finally:``, then\u001b[39;00m\n\u001b[1;32m 714\u001b[0m \u001b[38;5;66;03m# the response doesn't need to know about the connection. Otherwise\u001b[39;00m\n\u001b[1;32m 715\u001b[0m \u001b[38;5;66;03m# it will also try to release it and we'll have a double-release\u001b[39;00m\n\u001b[1;32m 716\u001b[0m \u001b[38;5;66;03m# mess.\u001b[39;00m\n\u001b[1;32m 717\u001b[0m response_conn \u001b[38;5;241m=\u001b[39m conn \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m release_conn \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:449\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[0;34m(self, conn, method, url, timeout, chunked, **httplib_request_kw)\u001b[0m\n\u001b[1;32m 444\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m conn\u001b[38;5;241m.\u001b[39mgetresponse()\n\u001b[1;32m 445\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 446\u001b[0m \u001b[38;5;66;03m# Remove the TypeError from the exception chain in\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;66;03m# Python 3 (including for exceptions like SystemExit).\u001b[39;00m\n\u001b[1;32m 448\u001b[0m \u001b[38;5;66;03m# Otherwise it looks like a bug in the code.\u001b[39;00m\n\u001b[0;32m--> 449\u001b[0m \u001b[43msix\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_from\u001b[49m\u001b[43m(\u001b[49m\u001b[43me\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (SocketTimeout, BaseSSLError, SocketError) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 451\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_raise_timeout(err\u001b[38;5;241m=\u001b[39me, url\u001b[38;5;241m=\u001b[39murl, timeout_value\u001b[38;5;241m=\u001b[39mread_timeout)\n",
|
||||
"File \u001b[0;32m<string>:3\u001b[0m, in \u001b[0;36mraise_from\u001b[0;34m(value, from_value)\u001b[0m\n",
|
||||
"File \u001b[0;32m~/Documents/Development/python/v2trading/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:444\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[0;34m(self, conn, method, url, timeout, chunked, **httplib_request_kw)\u001b[0m\n\u001b[1;32m 441\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[1;32m 442\u001b[0m \u001b[38;5;66;03m# Python 3\u001b[39;00m\n\u001b[1;32m 443\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 444\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m \u001b[43mconn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgetresponse\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 445\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 446\u001b[0m \u001b[38;5;66;03m# Remove the TypeError from the exception chain in\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;66;03m# Python 3 (including for exceptions like SystemExit).\u001b[39;00m\n\u001b[1;32m 448\u001b[0m \u001b[38;5;66;03m# Otherwise it looks like a bug in the code.\u001b[39;00m\n\u001b[1;32m 449\u001b[0m six\u001b[38;5;241m.\u001b[39mraise_from(e, \u001b[38;5;28;01mNone\u001b[39;00m)\n",
|
||||
"File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/http/client.py:1375\u001b[0m, in \u001b[0;36mHTTPConnection.getresponse\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1373\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1374\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1375\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbegin\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1376\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m:\n\u001b[1;32m 1377\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclose()\n",
|
||||
"File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/http/client.py:318\u001b[0m, in \u001b[0;36mHTTPResponse.begin\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 316\u001b[0m \u001b[38;5;66;03m# read until we get a non-100 response\u001b[39;00m\n\u001b[1;32m 317\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m--> 318\u001b[0m version, status, reason \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_read_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 319\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m status \u001b[38;5;241m!=\u001b[39m CONTINUE:\n\u001b[1;32m 320\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
|
||||
"File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/http/client.py:279\u001b[0m, in \u001b[0;36mHTTPResponse._read_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 278\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_read_status\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m--> 279\u001b[0m line \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreadline\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_MAXLINE\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124miso-8859-1\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 280\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(line) \u001b[38;5;241m>\u001b[39m _MAXLINE:\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m LineTooLong(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstatus line\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/socket.py:705\u001b[0m, in \u001b[0;36mSocketIO.readinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 703\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 704\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 705\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sock\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 706\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[1;32m 707\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout_occurred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
||||
"File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/ssl.py:1274\u001b[0m, in \u001b[0;36mSSLSocket.recv_into\u001b[0;34m(self, buffer, nbytes, flags)\u001b[0m\n\u001b[1;32m 1270\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m flags \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1271\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 1272\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m%\u001b[39m\n\u001b[1;32m 1273\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m)\n\u001b[0;32m-> 1274\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1275\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1276\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mrecv_into(buffer, nbytes, flags)\n",
|
||||
"File \u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/ssl.py:1130\u001b[0m, in \u001b[0;36mSSLSocket.read\u001b[0;34m(self, len, buffer)\u001b[0m\n\u001b[1;32m 1128\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m-> 1130\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sslobj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1132\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sslobj\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;28mlen\u001b[39m)\n",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"trades_df = fetch_daily_stock_trades(symbol, day_start, day_stop, exclude_conditions=exclude_conditions, minsize=minsize, force_remote=False, max_retries=5, backoff_factor=1)\n",
|
||||
"trades_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Either load trades or ohlcv from parquet if exists\n",
|
||||
"\n",
|
||||
"#trades_df = fetch_trades_parallel(symbol, day_start, day_stop, exclude_conditions=exclude_conditions, minsize=50, max_workers=20) #exclude_conditions=['C','O','4','B','7','V','P','W','U','Z','F'])\n",
|
||||
"# trades_df.to_parquet(file_trades, engine='pyarrow', compression='gzip')\n",
|
||||
"\n",
|
||||
"trades_df = pd.read_parquet(file_trades,engine='pyarrow')\n",
|
||||
"ohlcv_df = aggregate_trades(symbol=symbol, trades_df=trades_df, resolution=1, type=BarType.TIME)\n",
|
||||
"ohlcv_df.to_parquet(file_ohlcv, engine='pyarrow', compression='gzip')\n",
|
||||
"\n",
|
||||
"# ohlcv_df = pd.read_parquet(file_ohlcv,engine='pyarrow')\n",
|
||||
"# trades_df = pd.read_parquet(file_trades,engine='pyarrow')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#list all files is dir directory with parquet extension\n",
|
||||
"dir = DATA_DIR + \"/notebooks/\"\n",
|
||||
"import os\n",
|
||||
"files = [f for f in os.listdir(dir) if f.endswith(\".parquet\")]\n",
|
||||
"file_name = \"\"\n",
|
||||
"ohlcv_df = pd.read_parquet(file_ohlcv,engine='pyarrow')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ohlcv_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import seaborn as sns\n",
|
||||
"# Calculate daily returns\n",
|
||||
"ohlcv_df['returns'] = ohlcv_df['close'].pct_change().dropna()\n",
|
||||
"#same as above but pct_change is from 3 datapoints back, but only if it is the same date, else na\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Plot the probability distribution curve\n",
|
||||
"plt.figure(figsize=(10, 6))\n",
|
||||
"sns.histplot(df['returns'].dropna(), kde=True, stat='probability', bins=30)\n",
|
||||
"plt.title('Probability Distribution of Daily Returns')\n",
|
||||
"plt.xlabel('Daily Returns')\n",
|
||||
"plt.ylabel('Probability')\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.preprocessing import StandardScaler\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"from sklearn.metrics import accuracy_score\n",
|
||||
"\n",
|
||||
"# Define the intervals from 5 to 20 s, returns for each interval\n",
|
||||
"#maybe use rolling window?\n",
|
||||
"intervals = range(5, 21, 5)\n",
|
||||
"\n",
|
||||
"# Create columns for percentage returns\n",
|
||||
"rolling_window = 50\n",
|
||||
"\n",
|
||||
"# Normalize the returns using rolling mean and std\n",
|
||||
"for N in intervals:\n",
|
||||
" column_name = f'returns_{N}'\n",
|
||||
" rolling_mean = ohlcv_df[column_name].rolling(window=rolling_window).mean()\n",
|
||||
" rolling_std = ohlcv_df[column_name].rolling(window=rolling_window).std()\n",
|
||||
" ohlcv_df[f'norm_{column_name}'] = (ohlcv_df[column_name] - rolling_mean) / rolling_std\n",
|
||||
"\n",
|
||||
"# Display the dataframe with normalized return columns\n",
|
||||
"ohlcv_df\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Calculate the sum of the normalized return columns for each row\n",
|
||||
"ohlcv_df['sum_norm_returns'] = ohlcv_df[[f'norm_returns_{N}' for N in intervals]].sum(axis=1)\n",
|
||||
"\n",
|
||||
"# Sort the DataFrame based on the sum of normalized returns in descending order\n",
|
||||
"df_sorted = ohlcv_df.sort_values(by='sum_norm_returns', ascending=False)\n",
|
||||
"\n",
|
||||
"# Display the top rows with the highest sum of normalized returns\n",
|
||||
"df_sorted\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Drop initial rows with NaN values due to pct_change\n",
|
||||
"ohlcv_df.dropna(inplace=True)\n",
|
||||
"\n",
|
||||
"# Plotting the probability distribution curves\n",
|
||||
"plt.figure(figsize=(14, 8))\n",
|
||||
"for N in intervals:\n",
|
||||
" sns.kdeplot(ohlcv_df[f'returns_{N}'].dropna(), label=f'Returns {N}', fill=True)\n",
|
||||
"\n",
|
||||
"plt.title('Probability Distribution of Percentage Returns')\n",
|
||||
"plt.xlabel('Percentage Return')\n",
|
||||
"plt.ylabel('Density')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import seaborn as sns\n",
|
||||
"# Plot the probability distribution curve\n",
|
||||
"plt.figure(figsize=(10, 6))\n",
|
||||
"sns.histplot(ohlcv_df['returns'].dropna(), kde=True, stat='probability', bins=30)\n",
|
||||
"plt.title('Probability Distribution of Daily Returns')\n",
|
||||
"plt.xlabel('Daily Returns')\n",
|
||||
"plt.ylabel('Probability')\n",
|
||||
"plt.show()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#show only rows from ohlcv_df where returns > 0.005\n",
|
||||
"ohlcv_df[ohlcv_df['returns'] > 0.0005]\n",
|
||||
"\n",
|
||||
"#ohlcv_df[ohlcv_df['returns'] < -0.005]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#ohlcv where index = date 2024-03-13 and between hour 12\n",
|
||||
"\n",
|
||||
"a = ohlcv_df.loc['2024-03-13 12:00:00':'2024-03-13 13:00:00']\n",
|
||||
"a"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ohlcv_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"trades_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ohlcv_df.info()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"trades_df.to_parquet(\"trades_df-spy-0111-0111.parquett\", engine='pyarrow', compression='gzip')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"trades_df.to_parquet(\"trades_df-spy-111-0516.parquett\", engine='pyarrow', compression='gzip', allow_truncated_timestamps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ohlcv_df.to_parquet(\"ohlcv_df-spy-111-0516.parquett\", engine='pyarrow', compression='gzip')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"basic_data = vbt.Data.from_data(vbt.symbol_dict({symbol: ohlcv_df}), tz_convert=zoneNY)\n",
|
||||
"vbt.settings['plotting']['auto_rangebreaks'] = True\n",
|
||||
"basic_data.ohlcv.plot()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#access just BCA\n",
|
||||
"#df_filtered = df.loc[\"BAC\"]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
1526
research/indcross_parametrized.ipynb
Normal file
1526
research/indcross_parametrized.ipynb
Normal file
File diff suppressed because one or more lines are too long
557
research/ohlc_persistance_test.ipynb
Normal file
557
research/ohlc_persistance_test.ipynb
Normal file
File diff suppressed because one or more lines are too long
1602
research/prepare_aggregatied_data.ipynb
Normal file
1602
research/prepare_aggregatied_data.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
26673
research/rsi_alpaca.ipynb
Normal file
26673
research/rsi_alpaca.ipynb
Normal file
File diff suppressed because one or more lines are too long
1553
research/strat1/strat1_v1_MULTI.ipynb
Normal file
1553
research/strat1/strat1_v1_MULTI.ipynb
Normal file
File diff suppressed because one or more lines are too long
1570
research/strat1/strat1_v1_SINGLE.ipynb
Normal file
1570
research/strat1/strat1_v1_SINGLE.ipynb
Normal file
File diff suppressed because one or more lines are too long
1553
research/strat_LINREG_MULTI/v1_MULTI.ipynb
Normal file
1553
research/strat_LINREG_MULTI/v1_MULTI.ipynb
Normal file
File diff suppressed because one or more lines are too long
44669
research/strat_LINREG_MULTI/v1_SINGLE.ipynb
Normal file
44669
research/strat_LINREG_MULTI/v1_SINGLE.ipynb
Normal file
File diff suppressed because one or more lines are too long
1536
research/strat_ORDER_IMBALANCE/v1_MULTI.ipynb
Normal file
1536
research/strat_ORDER_IMBALANCE/v1_MULTI.ipynb
Normal file
File diff suppressed because one or more lines are too long
1569572
research/strat_ORDER_IMBALANCE/v1_SINGLE.ipynb
Normal file
1569572
research/strat_ORDER_IMBALANCE/v1_SINGLE.ipynb
Normal file
File diff suppressed because one or more lines are too long
1706
research/strat_ORDER_IMBALANCE/v2_SINGLE.ipynb
Normal file
1706
research/strat_ORDER_IMBALANCE/v2_SINGLE.ipynb
Normal file
File diff suppressed because one or more lines are too long
1536
research/strat_TIME_ENTRIES copy/v1_MULTI.ipynb
Normal file
1536
research/strat_TIME_ENTRIES copy/v1_MULTI.ipynb
Normal file
File diff suppressed because one or more lines are too long
44779
research/strat_TIME_ENTRIES copy/v1_SINGLE.ipynb
Normal file
44779
research/strat_TIME_ENTRIES copy/v1_SINGLE.ipynb
Normal file
File diff suppressed because one or more lines are too long
105
research/test1.ipynb
Normal file
105
research/test1.ipynb
Normal file
File diff suppressed because one or more lines are too long
421
research/test1sbars.ipynb
Normal file
421
research/test1sbars.ipynb
Normal file
@ -0,0 +1,421 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from v2realbot.tools.loadbatch import load_batch\n",
|
||||
"from v2realbot.utils.utils import zoneNY\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import vectorbtpro as vbt\n",
|
||||
"from itables import init_notebook_mode, show\n",
|
||||
"\n",
|
||||
"init_notebook_mode(all_interactive=True)\n",
|
||||
"\n",
|
||||
"vbt.settings.set_theme(\"dark\")\n",
|
||||
"vbt.settings['plotting']['layout']['width'] = 1280\n",
|
||||
"vbt.settings.plotting.auto_rangebreaks = True\n",
|
||||
"# Set the option to display with pagination\n",
|
||||
"pd.set_option('display.notebook_repr_html', True)\n",
|
||||
"pd.set_option('display.max_rows', 10) # Number of rows per page\n",
|
||||
"\n",
|
||||
"res, df = load_batch(batch_id=\"0fb5043a\", #46 days 1.3 - 6.5.\n",
|
||||
" space_resolution_evenly=False,\n",
|
||||
" indicators_columns=[\"Rsi14\"],\n",
|
||||
" main_session_only=True,\n",
|
||||
" verbose = False)\n",
|
||||
"if res < 0:\n",
|
||||
" print(\"Error\" + str(res) + str(df))\n",
|
||||
"df = df[\"bars\"]\n",
|
||||
"\n",
|
||||
"df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# filter dates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#filter na dny\n",
|
||||
"# dates_of_interest = pd.to_datetime(['2024-04-22', '2024-04-23']).tz_localize('US/Eastern')\n",
|
||||
"# filtered_df = df.loc[df.index.normalize().isin(dates_of_interest)]\n",
|
||||
"\n",
|
||||
"# df = filtered_df\n",
|
||||
"# df.info()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import plotly.io as pio\n",
|
||||
"pio.renderers.default = 'notebook'\n",
|
||||
"\n",
|
||||
"#naloadujeme do vbt symbol as column\n",
|
||||
"basic_data = vbt.Data.from_data({\"BAC\": df}, tz_convert=zoneNY)\n",
|
||||
"start_date = pd.Timestamp('2024-03-12 09:30', tz=zoneNY)\n",
|
||||
"end_date = pd.Timestamp('2024-03-13 16:00', tz=zoneNY)\n",
|
||||
"\n",
|
||||
"#basic_data = basic_data.transform(lambda df: df[df.index.date == start_date.date()])\n",
|
||||
"#basic_data = basic_data.transform(lambda df: df[(df.index >= start_date) & (df.index <= end_date)])\n",
|
||||
"#basic_data.data[\"BAC\"].info()\n",
|
||||
"\n",
|
||||
"# fig = basic_data.plot(plot_volume=False)\n",
|
||||
"# pivot_info = basic_data.run(\"pivotinfo\", up_th=0.003, down_th=0.002)\n",
|
||||
"# #pivot_info.plot()\n",
|
||||
"# pivot_info.plot(fig=fig, conf_value_trace_kwargs=dict(visible=True))\n",
|
||||
"# fig.show()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# rsi14 = basic_data.data[\"BAC\"][\"Rsi14\"].rename(\"Rsi14\")\n",
|
||||
"\n",
|
||||
"# rsi14.vbt.plot().show()\n",
|
||||
"#basic_data.xloc[\"09:30\":\"10:00\"].data[\"BAC\"].vbt.ohlcv.plot().show()\n",
|
||||
"\n",
|
||||
"vbt.settings.plotting.auto_rangebreaks = True\n",
|
||||
"#basic_data.data[\"BAC\"].vbt.ohlcv.plot()\n",
|
||||
"\n",
|
||||
"#basic_data.data[\"BAC\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"m1_data = basic_data[['Open', 'High', 'Low', 'Close', 'Volume']]\n",
|
||||
"\n",
|
||||
"m1_data.data[\"BAC\"]\n",
|
||||
"#m5_data = m1_data.resample(\"5T\")\n",
|
||||
"\n",
|
||||
"#m5_data.data[\"BAC\"].head(10)\n",
|
||||
"\n",
|
||||
"# m15_data = m1_data.resample(\"15T\")\n",
|
||||
"\n",
|
||||
"# m15 = m15_data.data[\"BAC\"]\n",
|
||||
"\n",
|
||||
"# m15.vbt.ohlcv.plot()\n",
|
||||
"\n",
|
||||
"# m1_data.wrapper.index\n",
|
||||
"\n",
|
||||
"# m1_resampler = m1_data.wrapper.get_resampler(\"1T\")\n",
|
||||
"# m1_resampler.index_difference(reverse=True)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# m5_resampler.prettify()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# defining ENTRY WINDOW and forced EXIT window"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#m1_data.data[\"BAC\"].info()\n",
|
||||
"import datetime\n",
|
||||
"# Define the market open and close times\n",
|
||||
"market_open = datetime.time(9, 30)\n",
|
||||
"market_close = datetime.time(16, 0)\n",
|
||||
"entry_window_opens = 1\n",
|
||||
"entry_window_closes = 350\n",
|
||||
"\n",
|
||||
"forced_exit_start = 380\n",
|
||||
"forced_exit_end = 390\n",
|
||||
"\n",
|
||||
"forced_exit = m1_data.symbol_wrapper.fill(False)\n",
|
||||
"entry_window_open= m1_data.symbol_wrapper.fill(False)\n",
|
||||
"\n",
|
||||
"# Calculate the time difference in minutes from market open for each timestamp\n",
|
||||
"elapsed_min_from_open = (forced_exit.index.hour - market_open.hour) * 60 + (forced_exit.index.minute - market_open.minute)\n",
|
||||
"\n",
|
||||
"entry_window_open[(elapsed_min_from_open >= entry_window_opens) & (elapsed_min_from_open < entry_window_closes)] = True\n",
|
||||
"forced_exit[(elapsed_min_from_open >= forced_exit_start) & (elapsed_min_from_open < forced_exit_end)] = True\n",
|
||||
"\n",
|
||||
"#entry_window_open.info()\n",
|
||||
"# forced_exit.tail(100)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"close = m1_data.close\n",
|
||||
"\n",
|
||||
"rsi = vbt.RSI.run(close, window=14)\n",
|
||||
"\n",
|
||||
"long_entries = (rsi.rsi.vbt.crossed_below(20) & entry_window_open)\n",
|
||||
"long_exits = (rsi.rsi.vbt.crossed_above(70) | forced_exit)\n",
|
||||
"#long_entries.info()\n",
|
||||
"#number of trues and falses in long_entries\n",
|
||||
"long_entries.value_counts()\n",
|
||||
"#long_exits.value_counts()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def plot_rsi(rsi, close, entries, exits):\n",
|
||||
" fig = vbt.make_subplots(rows=1, cols=1, shared_xaxes=True, specs=[[{\"secondary_y\": True}]], vertical_spacing=0.02, subplot_titles=(\"RSI\", \"Price\" ))\n",
|
||||
" close.vbt.plot(fig=fig, add_trace_kwargs=dict(secondary_y=True))\n",
|
||||
" rsi.plot(fig=fig, add_trace_kwargs=dict(secondary_y=False))\n",
|
||||
" entries.vbt.signals.plot_as_entries(rsi.rsi, fig=fig, add_trace_kwargs=dict(secondary_y=False)) \n",
|
||||
" exits.vbt.signals.plot_as_exits(rsi.rsi, fig=fig, add_trace_kwargs=dict(secondary_y=False)) \n",
|
||||
" return fig\n",
|
||||
"\n",
|
||||
"plot_rsi(rsi, close, long_entries, long_exits)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vbt.phelp(vbt.Portfolio.from_signals)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sl_stop = np.arange(0.03/100, 0.2/100, 0.02/100).tolist()\n",
|
||||
"# Using the round function\n",
|
||||
"sl_stop = [round(val, 4) for val in sl_stop]\n",
|
||||
"print(sl_stop)\n",
|
||||
"sl_stop = vbt.Param(sl_stop) #np.nan mean s no stoploss\n",
|
||||
"\n",
|
||||
"pf = vbt.Portfolio.from_signals(close=close, entries=long_entries, sl_stop=sl_stop, tp_stop = sl_stop, exits=long_exits,fees=0.0167/100, freq=\"1s\") #sl_stop=sl_stop, tp_stop = sl_stop, \n",
|
||||
"\n",
|
||||
"#pf.stats()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pf.plot()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pf[(0.0015,0.0013)].plot()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pf[0.03].plot_trade_signals()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# pristup k pf jako multi index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#pf[0.03].plot()\n",
|
||||
"#pf.order_records\n",
|
||||
"pf[(0.03)].stats()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#zgrupovane statistiky\n",
|
||||
"stats_df = pf.stats([\n",
|
||||
" 'total_return',\n",
|
||||
" 'total_trades',\n",
|
||||
" 'win_rate',\n",
|
||||
" 'expectancy'\n",
|
||||
"], agg_func=None)\n",
|
||||
"stats_df\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"stats_df.nlargest(50, 'Total Return [%]')\n",
|
||||
"#stats_df.info()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pf[(0.0011,0.0013)].plot()\n",
|
||||
"\n",
|
||||
"#pf[(0.0011,0.0013000000000000002)].plot()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pandas.tseries.offsets import DateOffset\n",
|
||||
"\n",
|
||||
"temp_data = basic_data['2024-4-22']\n",
|
||||
"temp_data\n",
|
||||
"res1m = temp_data[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n",
|
||||
"\n",
|
||||
"# Define a custom date offset that starts at 9:30 AM and spans 4 hours\n",
|
||||
"custom_offset = DateOffset(hours=4, minutes=30)\n",
|
||||
"\n",
|
||||
"# res1m = res1m.get().resample(\"4H\").agg({ \n",
|
||||
"# \"Open\": \"first\",\n",
|
||||
"# \"High\": \"max\",\n",
|
||||
"# \"Low\": \"min\",\n",
|
||||
"# \"Close\": \"last\",\n",
|
||||
"# \"Volume\": \"sum\"\n",
|
||||
"# })\n",
|
||||
"\n",
|
||||
"res4h = res1m.resample(\"1h\", resample_kwargs=dict(origin=\"start\"))\n",
|
||||
"\n",
|
||||
"res4h.data\n",
|
||||
"\n",
|
||||
"res15m = res1m.resample(\"15T\", resample_kwargs=dict(origin=\"start\"))\n",
|
||||
"\n",
|
||||
"res15m.data[\"BAC\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@vbt.njit\n",
|
||||
"def long_entry_place_func_nb(c, low, close, time_in_ns, rsi14, window_open, window_close):\n",
|
||||
" market_open_minutes = 570 # 9 hours * 60 minutes + 30 minutes\n",
|
||||
"\n",
|
||||
" for out_i in range(len(c.out)):\n",
|
||||
" i = c.from_i + out_i\n",
|
||||
"\n",
|
||||
" current_minutes = vbt.dt_nb.hour_nb(time_in_ns[i]) * 60 + vbt.dt_nb.minute_nb(time_in_ns[i])\n",
|
||||
" #print(\"current_minutes\", current_minutes)\n",
|
||||
" # Calculate elapsed minutes since market open at 9:30 AM\n",
|
||||
" elapsed_from_open = current_minutes - market_open_minutes\n",
|
||||
" elapsed_from_open = elapsed_from_open if elapsed_from_open >= 0 else 0\n",
|
||||
" #print( \"elapsed_from_open\", elapsed_from_open)\n",
|
||||
"\n",
|
||||
" #elapsed_from_open = elapsed_minutes_from_open_nb(time_in_ns) \n",
|
||||
" in_window = elapsed_from_open > window_open and elapsed_from_open < window_close\n",
|
||||
" #print(\"in_window\", in_window)\n",
|
||||
" # if in_window:\n",
|
||||
" # print(\"in window\")\n",
|
||||
"\n",
|
||||
" if in_window and rsi14[i] > 60: # and low[i, c.col] <= hit_price: # and hour == 9: # (4)!\n",
|
||||
" return out_i\n",
|
||||
" return -1\n",
|
||||
"\n",
|
||||
"@vbt.njit\n",
|
||||
"def long_exit_place_func_nb(c, high, close, time_index, tp, sl): # (5)!\n",
|
||||
" entry_i = c.from_i - c.wait\n",
|
||||
" entry_price = close[entry_i, c.col]\n",
|
||||
" hit_price = entry_price * (1 + tp)\n",
|
||||
" stop_price = entry_price * (1 - sl)\n",
|
||||
" for out_i in range(len(c.out)):\n",
|
||||
" i = c.from_i + out_i\n",
|
||||
" last_bar_of_day = vbt.dt_nb.day_changed_nb(time_index[i], time_index[i + 1])\n",
|
||||
"\n",
|
||||
" #print(next_day)\n",
|
||||
" if last_bar_of_day: #pokud je dalsi next day, tak zavirame posledni\n",
|
||||
" print(\"ted\",out_i)\n",
|
||||
" return out_i\n",
|
||||
" if close[i, c.col] >= hit_price or close[i, c.col] <= stop_price :\n",
|
||||
" return out_i\n",
|
||||
" return -1\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))\n",
|
||||
"\n",
|
||||
"df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df.sum()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
1639
research/test1sbars_roc.ipynb
Normal file
1639
research/test1sbars_roc.ipynb
Normal file
File diff suppressed because one or more lines are too long
@ -1,620 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import pyarrow\n",
|
||||
"import numpy as np\n",
|
||||
"from numba import jit\n",
|
||||
"import v2realbot.utils.config_handler as cfh"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Další info k pokračování je zde https://blog.quantinsti.com/tick-tick-ohlc-data-pandas-tutorial/"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'pandas.core.frame.DataFrame'>\n",
|
||||
"DatetimeIndex: 190261 entries, 2024-04-22 13:30:00.267711+00:00 to 2024-04-22 19:59:59.987614+00:00\n",
|
||||
"Data columns (total 6 columns):\n",
|
||||
" # Column Non-Null Count Dtype \n",
|
||||
"--- ------ -------------- ----- \n",
|
||||
" 0 exchange 190261 non-null object \n",
|
||||
" 1 price 190261 non-null float64\n",
|
||||
" 2 size 190261 non-null float64\n",
|
||||
" 3 id 190261 non-null int64 \n",
|
||||
" 4 conditions 190261 non-null object \n",
|
||||
" 5 tape 190261 non-null object \n",
|
||||
"dtypes: float64(2), int64(1), object(3)\n",
|
||||
"memory usage: 10.2+ MB\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>exchange</th>\n",
|
||||
" <th>price</th>\n",
|
||||
" <th>size</th>\n",
|
||||
" <th>id</th>\n",
|
||||
" <th>conditions</th>\n",
|
||||
" <th>tape</th>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>timestamp</th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.267711+00:00</th>\n",
|
||||
" <td>K</td>\n",
|
||||
" <td>36.890</td>\n",
|
||||
" <td>5.0</td>\n",
|
||||
" <td>52983525037630</td>\n",
|
||||
" <td>[ , F, I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.300501+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241117014</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.305439+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241117496</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.314520+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241118034</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.335201+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241121369</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>...</th>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.902614+00:00</th>\n",
|
||||
" <td>V</td>\n",
|
||||
" <td>37.750</td>\n",
|
||||
" <td>1100.0</td>\n",
|
||||
" <td>56480705310575</td>\n",
|
||||
" <td>[ ]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.977134+00:00</th>\n",
|
||||
" <td>N</td>\n",
|
||||
" <td>37.745</td>\n",
|
||||
" <td>300.0</td>\n",
|
||||
" <td>52983559963478</td>\n",
|
||||
" <td>[ ]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.977137+00:00</th>\n",
|
||||
" <td>N</td>\n",
|
||||
" <td>37.740</td>\n",
|
||||
" <td>7300.0</td>\n",
|
||||
" <td>52983559963696</td>\n",
|
||||
" <td>[ ]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.978626+00:00</th>\n",
|
||||
" <td>V</td>\n",
|
||||
" <td>37.750</td>\n",
|
||||
" <td>16.0</td>\n",
|
||||
" <td>56480706886228</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.987614+00:00</th>\n",
|
||||
" <td>N</td>\n",
|
||||
" <td>37.745</td>\n",
|
||||
" <td>30.0</td>\n",
|
||||
" <td>52983559963958</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"<p>190261 rows × 6 columns</p>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" exchange price size id \\\n",
|
||||
"timestamp \n",
|
||||
"2024-04-22 13:30:00.267711+00:00 K 36.890 5.0 52983525037630 \n",
|
||||
"2024-04-22 13:30:00.300501+00:00 D 37.005 1.0 71675241117014 \n",
|
||||
"2024-04-22 13:30:00.305439+00:00 D 37.005 1.0 71675241117496 \n",
|
||||
"2024-04-22 13:30:00.314520+00:00 D 37.005 1.0 71675241118034 \n",
|
||||
"2024-04-22 13:30:00.335201+00:00 D 37.005 1.0 71675241121369 \n",
|
||||
"... ... ... ... ... \n",
|
||||
"2024-04-22 19:59:59.902614+00:00 V 37.750 1100.0 56480705310575 \n",
|
||||
"2024-04-22 19:59:59.977134+00:00 N 37.745 300.0 52983559963478 \n",
|
||||
"2024-04-22 19:59:59.977137+00:00 N 37.740 7300.0 52983559963696 \n",
|
||||
"2024-04-22 19:59:59.978626+00:00 V 37.750 16.0 56480706886228 \n",
|
||||
"2024-04-22 19:59:59.987614+00:00 N 37.745 30.0 52983559963958 \n",
|
||||
"\n",
|
||||
" conditions tape \n",
|
||||
"timestamp \n",
|
||||
"2024-04-22 13:30:00.267711+00:00 [ , F, I] A \n",
|
||||
"2024-04-22 13:30:00.300501+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.305439+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.314520+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.335201+00:00 [ , I] A \n",
|
||||
"... ... ... \n",
|
||||
"2024-04-22 19:59:59.902614+00:00 [ ] A \n",
|
||||
"2024-04-22 19:59:59.977134+00:00 [ ] A \n",
|
||||
"2024-04-22 19:59:59.977137+00:00 [ ] A \n",
|
||||
"2024-04-22 19:59:59.978626+00:00 [ , I] A \n",
|
||||
"2024-04-22 19:59:59.987614+00:00 [ , I] A \n",
|
||||
"\n",
|
||||
"[190261 rows x 6 columns]"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tdf=pd.read_parquet('trades_bac.parquet',engine='pyarrow')\n",
|
||||
"#print(df)\n",
|
||||
"df = tdf.loc['BAC']\n",
|
||||
"df.info()\n",
|
||||
"df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@jit(nopython=True)\n",
|
||||
"def ohlcv_bars(ticks, start_time, end_time, resolution):\n",
|
||||
" \"\"\"\n",
|
||||
" Generate OHLCV bars from tick data, skipping intervals without trading activity.\n",
|
||||
" \n",
|
||||
" Parameters:\n",
|
||||
" - ticks: numpy array with columns [timestamp, price, size]\n",
|
||||
" - start_time: the start timestamp for bars (Unix timestamp)\n",
|
||||
" - end_time: the end timestamp for bars (Unix timestamp)\n",
|
||||
" - resolution: time resolution in seconds\n",
|
||||
" \n",
|
||||
" Returns:\n",
|
||||
" - OHLCV bars as a numpy array\n",
|
||||
" \"\"\"\n",
|
||||
" num_bars = (end_time - start_time) // resolution + 1\n",
|
||||
" bar_list = []\n",
|
||||
"\n",
|
||||
" for i in range(num_bars):\n",
|
||||
" bar_start_time = start_time + i * resolution\n",
|
||||
" bar_end_time = bar_start_time + resolution\n",
|
||||
" bar_ticks = ticks[(ticks[:, 0] >= bar_start_time) & (ticks[:, 0] < bar_end_time)]\n",
|
||||
" \n",
|
||||
" if bar_ticks.shape[0] == 0:\n",
|
||||
" continue # Skip this bar as there are no ticks\n",
|
||||
"\n",
|
||||
" # Calculate OHLCV values\n",
|
||||
" open_price = bar_ticks[0, 1] # open\n",
|
||||
" high_price = np.max(bar_ticks[:, 1]) # high\n",
|
||||
" low_price = np.min(bar_ticks[:, 1]) # low\n",
|
||||
" close_price = bar_ticks[-1, 1] # close\n",
|
||||
" volume = np.sum(bar_ticks[:, 2]) # volume\n",
|
||||
" bar_time = bar_start_time # timestamp for the bar\n",
|
||||
"\n",
|
||||
" bar_list.append([open_price, high_price, low_price, close_price, volume, bar_time])\n",
|
||||
"\n",
|
||||
" # Convert list to numpy array\n",
|
||||
" if bar_list:\n",
|
||||
" ohlcv = np.array(bar_list)\n",
|
||||
" else:\n",
|
||||
" ohlcv = np.empty((0, 6)) # return an empty array if no bars were created\n",
|
||||
"\n",
|
||||
" return ohlcv\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'pandas.core.frame.DataFrame'>\n",
|
||||
"DatetimeIndex: 190261 entries, 2024-04-22 13:30:00.267711+00:00 to 2024-04-22 19:59:59.987614+00:00\n",
|
||||
"Data columns (total 6 columns):\n",
|
||||
" # Column Non-Null Count Dtype \n",
|
||||
"--- ------ -------------- ----- \n",
|
||||
" 0 exchange 190261 non-null object \n",
|
||||
" 1 price 190261 non-null float64\n",
|
||||
" 2 size 190261 non-null float64\n",
|
||||
" 3 id 190261 non-null int64 \n",
|
||||
" 4 conditions 190261 non-null object \n",
|
||||
" 5 tape 190261 non-null object \n",
|
||||
"dtypes: float64(2), int64(1), object(3)\n",
|
||||
"memory usage: 10.2+ MB\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"df.info()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['C', 'O', '4', 'B', '7', 'V', 'P', 'W', 'U', 'Z', 'F']\n",
|
||||
"<class 'pandas.core.frame.DataFrame'>\n",
|
||||
"DatetimeIndex: 143751 entries, 2024-04-22 13:30:00.300501+00:00 to 2024-04-22 19:59:59.987614+00:00\n",
|
||||
"Data columns (total 6 columns):\n",
|
||||
" # Column Non-Null Count Dtype \n",
|
||||
"--- ------ -------------- ----- \n",
|
||||
" 0 exchange 143751 non-null object \n",
|
||||
" 1 price 143751 non-null float64\n",
|
||||
" 2 size 143751 non-null float64\n",
|
||||
" 3 id 143751 non-null int64 \n",
|
||||
" 4 conditions 143751 non-null object \n",
|
||||
" 5 tape 143751 non-null object \n",
|
||||
"dtypes: float64(2), int64(1), object(3)\n",
|
||||
"memory usage: 7.7+ MB\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>exchange</th>\n",
|
||||
" <th>price</th>\n",
|
||||
" <th>size</th>\n",
|
||||
" <th>id</th>\n",
|
||||
" <th>conditions</th>\n",
|
||||
" <th>tape</th>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>timestamp</th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.300501+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241117014</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.305439+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241117496</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.314520+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241118034</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.335201+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241121369</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 13:30:00.346219+00:00</th>\n",
|
||||
" <td>D</td>\n",
|
||||
" <td>37.005</td>\n",
|
||||
" <td>1.0</td>\n",
|
||||
" <td>71675241122389</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>...</th>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" <td>...</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.902614+00:00</th>\n",
|
||||
" <td>V</td>\n",
|
||||
" <td>37.750</td>\n",
|
||||
" <td>1100.0</td>\n",
|
||||
" <td>56480705310575</td>\n",
|
||||
" <td>[ ]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.977134+00:00</th>\n",
|
||||
" <td>N</td>\n",
|
||||
" <td>37.745</td>\n",
|
||||
" <td>300.0</td>\n",
|
||||
" <td>52983559963478</td>\n",
|
||||
" <td>[ ]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.977137+00:00</th>\n",
|
||||
" <td>N</td>\n",
|
||||
" <td>37.740</td>\n",
|
||||
" <td>7300.0</td>\n",
|
||||
" <td>52983559963696</td>\n",
|
||||
" <td>[ ]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.978626+00:00</th>\n",
|
||||
" <td>V</td>\n",
|
||||
" <td>37.750</td>\n",
|
||||
" <td>16.0</td>\n",
|
||||
" <td>56480706886228</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2024-04-22 19:59:59.987614+00:00</th>\n",
|
||||
" <td>N</td>\n",
|
||||
" <td>37.745</td>\n",
|
||||
" <td>30.0</td>\n",
|
||||
" <td>52983559963958</td>\n",
|
||||
" <td>[ , I]</td>\n",
|
||||
" <td>A</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"<p>143751 rows × 6 columns</p>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" exchange price size id \\\n",
|
||||
"timestamp \n",
|
||||
"2024-04-22 13:30:00.300501+00:00 D 37.005 1.0 71675241117014 \n",
|
||||
"2024-04-22 13:30:00.305439+00:00 D 37.005 1.0 71675241117496 \n",
|
||||
"2024-04-22 13:30:00.314520+00:00 D 37.005 1.0 71675241118034 \n",
|
||||
"2024-04-22 13:30:00.335201+00:00 D 37.005 1.0 71675241121369 \n",
|
||||
"2024-04-22 13:30:00.346219+00:00 D 37.005 1.0 71675241122389 \n",
|
||||
"... ... ... ... ... \n",
|
||||
"2024-04-22 19:59:59.902614+00:00 V 37.750 1100.0 56480705310575 \n",
|
||||
"2024-04-22 19:59:59.977134+00:00 N 37.745 300.0 52983559963478 \n",
|
||||
"2024-04-22 19:59:59.977137+00:00 N 37.740 7300.0 52983559963696 \n",
|
||||
"2024-04-22 19:59:59.978626+00:00 V 37.750 16.0 56480706886228 \n",
|
||||
"2024-04-22 19:59:59.987614+00:00 N 37.745 30.0 52983559963958 \n",
|
||||
"\n",
|
||||
" conditions tape \n",
|
||||
"timestamp \n",
|
||||
"2024-04-22 13:30:00.300501+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.305439+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.314520+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.335201+00:00 [ , I] A \n",
|
||||
"2024-04-22 13:30:00.346219+00:00 [ , I] A \n",
|
||||
"... ... ... \n",
|
||||
"2024-04-22 19:59:59.902614+00:00 [ ] A \n",
|
||||
"2024-04-22 19:59:59.977134+00:00 [ ] A \n",
|
||||
"2024-04-22 19:59:59.977137+00:00 [ ] A \n",
|
||||
"2024-04-22 19:59:59.978626+00:00 [ , I] A \n",
|
||||
"2024-04-22 19:59:59.987614+00:00 [ , I] A \n",
|
||||
"\n",
|
||||
"[143751 rows x 6 columns]"
|
||||
]
|
||||
},
|
||||
"execution_count": 41,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"excludes = cfh.config_handler.get_val('AGG_EXCLUDED_TRADES')\n",
|
||||
"print(excludes)\n",
|
||||
"#excludes = [\"F\", \"I\"]\n",
|
||||
"# FILTER EXCLUDED TRADES\n",
|
||||
"# Filter rows to exclude those where 'conditions' contains 'F' or 'I'\n",
|
||||
"# This simplifies the logic by directly using ~ (bitwise not operator) with np.isin\n",
|
||||
"df = df[~df['conditions'].apply(lambda x: np.isin(x, excludes).any())]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/8p/dwqnp65s0s77jdbm4_6z4vp80000gn/T/ipykernel_52602/3341929382.py:2: DeprecationWarning: parsing timezone aware datetimes is deprecated; this will raise an error in the future\n",
|
||||
" structured_array = np.array(list(zip(df.index, df['price'], df['size'])),\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[('2024-04-22T13:30:00.300501000', 37.005, 1.0e+00)\n",
|
||||
" ('2024-04-22T13:30:00.305439000', 37.005, 1.0e+00)\n",
|
||||
" ('2024-04-22T13:30:00.314520000', 37.005, 1.0e+00) ...\n",
|
||||
" ('2024-04-22T19:59:59.977137000', 37.74 , 7.3e+03)\n",
|
||||
" ('2024-04-22T19:59:59.978626000', 37.75 , 1.6e+01)\n",
|
||||
" ('2024-04-22T19:59:59.987614000', 37.745, 3.0e+01)]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([('2024-04-22T13:30:00.300501000', 37.005, 1.0e+00),\n",
|
||||
" ('2024-04-22T13:30:00.305439000', 37.005, 1.0e+00),\n",
|
||||
" ('2024-04-22T13:30:00.314520000', 37.005, 1.0e+00), ...,\n",
|
||||
" ('2024-04-22T19:59:59.977137000', 37.74 , 7.3e+03),\n",
|
||||
" ('2024-04-22T19:59:59.978626000', 37.75 , 1.6e+01),\n",
|
||||
" ('2024-04-22T19:59:59.987614000', 37.745, 3.0e+01)],\n",
|
||||
" dtype=[('timestamp', '<M8[ns]'), ('price', '<f8'), ('size', '<f8')])"
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Creating a structured array with the timestamp as the first element\n",
|
||||
"structured_array = np.array(list(zip(df.index, df['price'], df['size'])),\n",
|
||||
" dtype=[('timestamp', 'datetime64[ns]'), ('price', 'float'), ('size', 'float')])\n",
|
||||
"\n",
|
||||
"print(structured_array)\n",
|
||||
"structured_array\n",
|
||||
"\n",
|
||||
"# ticks = df[['index', 'price', 'size']].to_numpy()\n",
|
||||
"# # ticks[:, 0] = pd.to_datetime(ticks[:, 0]).astype('int64') // 1_000_000_000 # \n",
|
||||
"# ticks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"resolution_seconds = 1 # 1 second resolution\n",
|
||||
"ohlcv_data = ohlcv_bars(structured_array, resolution_seconds)\n",
|
||||
"\n",
|
||||
"# Converting the result back to DataFrame for better usability\n",
|
||||
"ohlcv_df = pd.DataFrame(ohlcv_data, columns=['Open', 'High', 'Low', 'Close', 'Volume', 'Time'])\n",
|
||||
"ohlcv_df['Time'] = pd.to_datetime(ohlcv_df['Time'], unit='s') # Convert timestamps back to datetime\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@ -1,7 +1,9 @@
|
||||
import os,sys
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from alpaca.data.historical import CryptoHistoricalDataClient, StockHistoricalDataClient
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from alpaca.data.historical import StockHistoricalDataClient
|
||||
from alpaca.data.requests import CryptoLatestTradeRequest, StockLatestTradeRequest, StockLatestBarRequest, StockTradesRequest
|
||||
from alpaca.data.enums import DataFeed
|
||||
from v2realbot.config import ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY
|
||||
|
||||
66
testy/vectorbt/testHtml2MD.py
Normal file
66
testy/vectorbt/testHtml2MD.py
Normal file
@ -0,0 +1,66 @@
|
||||
import os
|
||||
from bs4 import BeautifulSoup
|
||||
import html2text
|
||||
|
||||
def convert_html_to_markdown(html_content, link_mapping):
|
||||
h = html2text.HTML2Text()
|
||||
h.ignore_links = False
|
||||
|
||||
# Update internal links to point to the relevant sections in the Markdown
|
||||
soup = BeautifulSoup(html_content, 'html.parser')
|
||||
for a in soup.find_all('a', href=True):
|
||||
href = a['href']
|
||||
if href in link_mapping:
|
||||
a['href'] = f"#{link_mapping[href]}"
|
||||
|
||||
return h.handle(str(soup))
|
||||
|
||||
def create_link_mapping(root_dir):
|
||||
link_mapping = {}
|
||||
for subdir, _, files in os.walk(root_dir):
|
||||
for file in files:
|
||||
if file == "index.html":
|
||||
relative_path = os.path.relpath(os.path.join(subdir, file), root_dir)
|
||||
chapter_id = relative_path.replace(os.sep, '-').replace('index.html', '')
|
||||
link_mapping[relative_path] = chapter_id
|
||||
link_mapping[relative_path.replace(os.sep, '/')] = chapter_id # for URLs with slashes
|
||||
return link_mapping
|
||||
|
||||
def read_html_files(root_dir, link_mapping):
|
||||
markdown_content = []
|
||||
|
||||
for subdir, _, files in os.walk(root_dir):
|
||||
relative_path = os.path.relpath(subdir, root_dir)
|
||||
if files and any(file == "index.html" for file in files):
|
||||
# Add directory as a heading based on its depth
|
||||
heading_level = relative_path.count(os.sep) + 1
|
||||
markdown_content.append(f"{'#' * heading_level} {relative_path}\n")
|
||||
|
||||
for file in files:
|
||||
if file == "index.html":
|
||||
file_path = os.path.join(subdir, file)
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
html_content = f.read()
|
||||
soup = BeautifulSoup(html_content, 'html.parser')
|
||||
title = soup.title.string if soup.title else "No Title"
|
||||
chapter_id = os.path.relpath(file_path, root_dir).replace(os.sep, '-').replace('index.html', '')
|
||||
markdown_content.append(f"<a id='{chapter_id}'></a>\n")
|
||||
markdown_content.append(f"{'#' * (heading_level + 1)} {title}\n")
|
||||
markdown_content.append(convert_html_to_markdown(html_content, link_mapping))
|
||||
|
||||
return "\n".join(markdown_content)
|
||||
|
||||
def save_to_markdown_file(content, output_file):
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
def main():
|
||||
root_dir = "./v2realbot/static/js/vbt/"
|
||||
output_file = "output.md"
|
||||
link_mapping = create_link_mapping(root_dir)
|
||||
markdown_content = read_html_files(root_dir, link_mapping)
|
||||
save_to_markdown_file(markdown_content, output_file)
|
||||
print(f"Markdown document created at {output_file}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -4,6 +4,7 @@ from appdirs import user_data_dir
|
||||
from pathlib import Path
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from dotenv import load_dotenv
|
||||
# Global flag to track if the ml module has been imported (solution for long import times of tensorflow)
|
||||
#the first occurence of using it will load it globally
|
||||
_ml_module_loaded = False
|
||||
@ -16,6 +17,9 @@ RUNNER_DETAIL_DIRECTORY = Path(__file__).parent.parent.parent / "runner_detail"
|
||||
LOG_PATH = Path(__file__).parent.parent
|
||||
LOG_FILE = Path(__file__).parent.parent / "strat.log"
|
||||
JOB_LOG_FILE = Path(__file__).parent.parent / "job.log"
|
||||
DOTENV_DIRECTORY = Path(__file__).parent.parent.parent
|
||||
ENV_FILE = DOTENV_DIRECTORY / '.env'
|
||||
|
||||
|
||||
#stratvars that cannot be changed in gui
|
||||
STRATVARS_UNCHANGEABLES = ['pendingbuys', 'blockbuy', 'jevylozeno', 'limitka']
|
||||
@ -26,6 +30,12 @@ MODEL_DIR = Path(DATA_DIR)/"models"
|
||||
PROFILING_NEXT_ENABLED = False
|
||||
PROFILING_OUTPUT_DIR = DATA_DIR
|
||||
|
||||
#NALOADUJEME DOTENV ENV VARIABLES
|
||||
if load_dotenv(ENV_FILE, verbose=True) is False:
|
||||
print(f"Error loading.env file {ENV_FILE}. Now depending on ENV VARIABLES set externally.")
|
||||
else:
|
||||
print(f"Loaded env variables from file {ENV_FILE}")
|
||||
|
||||
#WIP - FILL CONFIGURATION CLASS FOR BACKTESTING
|
||||
class BT_FILL_CONF:
|
||||
""""
|
||||
@ -68,7 +78,7 @@ def get_key(mode: Mode, account: Account):
|
||||
#strategy instance main loop heartbeat
|
||||
HEARTBEAT_TIMEOUT=5
|
||||
|
||||
WEB_API_KEY="david"
|
||||
WEB_API_KEY=os.environ.get('WEB_API_KEY')
|
||||
|
||||
#PRIMARY PAPER
|
||||
ACCOUNT1_PAPER_API_KEY = os.environ.get('ACCOUNT1_PAPER_API_KEY')
|
||||
|
||||
@ -1,6 +1,11 @@
|
||||
from enum import Enum
|
||||
from alpaca.trading.enums import OrderSide, OrderStatus, OrderType
|
||||
|
||||
class BarType(str, Enum):
|
||||
TIME = "time"
|
||||
VOLUME = "volume"
|
||||
DOLLAR = "dollar"
|
||||
|
||||
class Env(str, Enum):
|
||||
PROD = "prod"
|
||||
TEST = "test"
|
||||
|
||||
1411
v2realbot/loader/agg_vect.ipynb
Normal file
1411
v2realbot/loader/agg_vect.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,121 +2,569 @@ import pandas as pd
|
||||
import numpy as np
|
||||
from numba import jit
|
||||
from alpaca.data.historical import StockHistoricalDataClient
|
||||
from sqlalchemy import column
|
||||
from v2realbot.config import ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, DATA_DIR
|
||||
from alpaca.data.requests import StockTradesRequest
|
||||
import time
|
||||
from datetime import datetime
|
||||
import time as time_module
|
||||
from v2realbot.utils.utils import parse_alpaca_timestamp, ltp, zoneNY, send_to_telegram, fetch_calendar_data
|
||||
import pyarrow
|
||||
|
||||
from traceback import format_exc
|
||||
from datetime import timedelta, datetime, time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import os
|
||||
import gzip
|
||||
import pickle
|
||||
import random
|
||||
from alpaca.data.models import BarSet, QuoteSet, TradeSet
|
||||
import v2realbot.utils.config_handler as cfh
|
||||
from v2realbot.enums.enums import BarType
|
||||
from tqdm import tqdm
|
||||
""""
|
||||
WIP - for later use
|
||||
Module used for vectorized aggregation of trades.
|
||||
|
||||
Includes fetch (remote/cached) methods and numba aggregator function for TIME BASED, VOLUME BASED and DOLLAR BARS
|
||||
|
||||
"""""
|
||||
|
||||
def fetch_stock_trades(symbol, start, end, max_retries=5, backoff_factor=1):
|
||||
"""
|
||||
Attempts to fetch stock trades with exponential backoff. Raises an exception if all retries fail.
|
||||
def aggregate_trades(symbol: str, trades_df: pd.DataFrame, resolution: int, type: BarType = BarType.TIME):
|
||||
""""
|
||||
Accepts dataframe with trades keyed by symbol. Preparess dataframe to
|
||||
numpy and calls Numba optimized aggregator for given bar type. (time/volume/dollar)
|
||||
"""""
|
||||
trades_df = trades_df.loc[symbol]
|
||||
trades_df= trades_df.reset_index()
|
||||
ticks = trades_df[['timestamp', 'price', 'size']].to_numpy()
|
||||
# Extract the timestamps column (assuming it's the first column)
|
||||
timestamps = ticks[:, 0]
|
||||
# Convert the timestamps to Unix timestamps in seconds with microsecond precision
|
||||
unix_timestamps_s = np.array([ts.timestamp() for ts in timestamps], dtype='float64')
|
||||
# Replace the original timestamps in the NumPy array with the converted Unix timestamps
|
||||
ticks[:, 0] = unix_timestamps_s
|
||||
ticks = ticks.astype(np.float64)
|
||||
#based on type, specific aggregator function is called
|
||||
match type:
|
||||
case BarType.TIME:
|
||||
ohlcv_bars = generate_time_bars_nb(ticks, resolution)
|
||||
case BarType.VOLUME:
|
||||
ohlcv_bars = generate_volume_bars_nb(ticks, resolution)
|
||||
case BarType.DOLLAR:
|
||||
ohlcv_bars = generate_dollar_bars_nb(ticks, resolution)
|
||||
case _:
|
||||
raise ValueError("Invalid bar type. Supported types are 'time', 'volume' and 'dollar'.")
|
||||
# Convert the resulting array back to a DataFrame
|
||||
columns = ['time', 'open', 'high', 'low', 'close', 'volume', 'trades']
|
||||
if type == BarType.DOLLAR:
|
||||
columns.append('amount')
|
||||
columns.append('updated')
|
||||
if type == BarType.TIME:
|
||||
columns.append('vwap')
|
||||
columns.append('buyvolume')
|
||||
columns.append('sellvolume')
|
||||
if type == BarType.VOLUME:
|
||||
columns.append('buyvolume')
|
||||
columns.append('sellvolume')
|
||||
ohlcv_df = pd.DataFrame(ohlcv_bars, columns=columns)
|
||||
ohlcv_df['time'] = pd.to_datetime(ohlcv_df['time'], unit='s').dt.tz_localize('UTC').dt.tz_convert(zoneNY)
|
||||
#print(ohlcv_df['updated'])
|
||||
ohlcv_df['updated'] = pd.to_datetime(ohlcv_df['updated'], unit="s").dt.tz_localize('UTC').dt.tz_convert(zoneNY)
|
||||
# Round to microseconds to maintain six decimal places
|
||||
ohlcv_df['updated'] = ohlcv_df['updated'].dt.round('us')
|
||||
|
||||
:param symbol: The stock symbol to fetch trades for.
|
||||
:param start: The start time for the trade data.
|
||||
:param end: The end time for the trade data.
|
||||
:param max_retries: Maximum number of retries.
|
||||
:param backoff_factor: Factor to determine the next sleep time.
|
||||
:return: TradesResponse object.
|
||||
:raises: ConnectionError if all retries fail.
|
||||
ohlcv_df.set_index('time', inplace=True)
|
||||
#ohlcv_df.index = ohlcv_df.index.tz_localize('UTC').tz_convert(zoneNY)
|
||||
return ohlcv_df
|
||||
|
||||
# Function to ensure fractional seconds are present
|
||||
def ensure_fractional_seconds(timestamp):
|
||||
if '.' not in timestamp:
|
||||
# Inserting .000000 before the timezone indicator 'Z'
|
||||
return timestamp.replace('Z', '.000000Z')
|
||||
else:
|
||||
return timestamp
|
||||
|
||||
def convert_dict_to_multiindex_df(tradesResponse):
|
||||
""""
|
||||
Converts dictionary from cache or from remote (raw input) to multiindex dataframe.
|
||||
with microsecond precision (from nanoseconds in the raw data)
|
||||
"""""
|
||||
# Create a DataFrame for each key and add the key as part of the MultiIndex
|
||||
dfs = []
|
||||
for key, values in tradesResponse.items():
|
||||
df = pd.DataFrame(values)
|
||||
# Rename columns
|
||||
# Select and order columns explicitly
|
||||
#print(df)
|
||||
df = df[['t', 'x', 'p', 's', 'i', 'c','z']]
|
||||
df.rename(columns={'t': 'timestamp', 'c': 'conditions', 'p': 'price', 's': 'size', 'x': 'exchange', 'z':'tape', 'i':'id'}, inplace=True)
|
||||
df['symbol'] = key # Add ticker as a column
|
||||
|
||||
# Apply the function to ensure all timestamps have fractional seconds
|
||||
#zvazit zda toto ponechat a nebo dat jen pri urcitem erroru pri to_datetime
|
||||
#pripadne pak pridelat efektivnejsi pristup, aneb nahrazeni NaT - https://chatgpt.com/c/d2be6f87-b38f-4050-a1c6-541d100b1474
|
||||
df['timestamp'] = df['timestamp'].apply(ensure_fractional_seconds)
|
||||
|
||||
df['timestamp'] = pd.to_datetime(df['timestamp'], errors='coerce') # Convert 't' from string to datetime before setting it as an index
|
||||
|
||||
#Adjust to microsecond precision
|
||||
df.loc[df['timestamp'].notna(), 'timestamp'] = df['timestamp'].dt.floor('us')
|
||||
|
||||
df.set_index(['symbol', 'timestamp'], inplace=True) # Set the multi-level index using both 'ticker' and 't'
|
||||
df = df.tz_convert(zoneNY, level='timestamp')
|
||||
dfs.append(df)
|
||||
|
||||
# Concatenate all DataFrames into a single DataFrame with MultiIndex
|
||||
final_df = pd.concat(dfs)
|
||||
|
||||
return final_df
|
||||
|
||||
def dict_to_df(tradesResponse, start, end, exclude_conditions = None, minsize = None):
|
||||
""""
|
||||
Transforms dict to Tradeset, then df and to zone aware
|
||||
Also filters to start and end if necessary (ex. 9:30 to 15:40 is required only)
|
||||
|
||||
NOTE: prepodkladame, ze tradesResponse je dict from Raw data (cached/remote)
|
||||
"""""
|
||||
|
||||
df = convert_dict_to_multiindex_df(tradesResponse)
|
||||
|
||||
#REQUIRED FILTERING
|
||||
#pokud je zacatek pozdeji nebo konec driv tak orizneme
|
||||
if (start.time() > time(9, 30) or end.time() < time(16, 0)):
|
||||
print(f"filtrujeme {start.time()} {end.time()}")
|
||||
# Define the time range
|
||||
# start_time = pd.Timestamp(start.time(), tz=zoneNY).time()
|
||||
# end_time = pd.Timestamp(end.time(), tz=zoneNY).time()
|
||||
|
||||
# Create a mask to filter rows within the specified time range
|
||||
mask = (df.index.get_level_values('timestamp') >= start) & \
|
||||
(df.index.get_level_values('timestamp') <= end)
|
||||
|
||||
# Apply the mask to the DataFrame
|
||||
df = df[mask]
|
||||
|
||||
if exclude_conditions is not None:
|
||||
print(f"excluding conditions {exclude_conditions}")
|
||||
# Create a mask to exclude rows with any of the specified conditions
|
||||
mask = df['conditions'].apply(lambda x: any(cond in exclude_conditions for cond in x))
|
||||
|
||||
# Filter out the rows with specified conditions
|
||||
df = df[~mask]
|
||||
|
||||
if minsize is not None:
|
||||
print(f"minsize {minsize}")
|
||||
#exclude conditions
|
||||
df = df[df['size'] >= minsize]
|
||||
return df
|
||||
|
||||
def fetch_daily_stock_trades(symbol, start, end, exclude_conditions=None, minsize=None, force_remote=False, max_retries=5, backoff_factor=1):
|
||||
#doc for this function
|
||||
"""
|
||||
client = StockHistoricalDataClient(ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY)
|
||||
Attempts to fetch stock trades either from cache or remote. When remote, it uses retry mechanism with exponential backoff.
|
||||
Also it stores the data to cache if it is not already there.
|
||||
by using force_remote - forcess using remote data always and thus refreshing cache for these dates
|
||||
Attributes:
|
||||
:param symbol: The stock symbol to fetch trades for.
|
||||
:param start: The start time for the trade data.
|
||||
:param end: The end time for the trade data.
|
||||
:exclude_conditions: list of string conditions to exclude from the data
|
||||
:minsize minimum size of trade to be included in the data
|
||||
:force_remote will always use remote data and refresh cache
|
||||
:param max_retries: Maximum number of retries.
|
||||
:param backoff_factor: Factor to determine the next sleep time.
|
||||
:return: TradesResponse object.
|
||||
:raises: ConnectionError if all retries fail.
|
||||
|
||||
We use tradecache only for main sessison requests = 9:30 to 16:00
|
||||
Do budoucna ukládat celý den BAC-20240203.cache.gz a z toho si pak filtrovat bud main sesssionu a extended
|
||||
Ale zatim je uloženo jen main session v BAC-timestampopenu-timestampclose.cache.gz
|
||||
"""
|
||||
is_same_day = start.date() == end.date()
|
||||
# Determine if the requested times fall within the main session
|
||||
in_main_session = (time(9, 30) <= start.time() < time(16, 0)) and (time(9, 30) <= end.time() <= time(16, 0))
|
||||
file_path = ''
|
||||
|
||||
if in_main_session:
|
||||
filename_start = zoneNY.localize(datetime.combine(start.date(), time(9, 30)))
|
||||
filename_end = zoneNY.localize(datetime.combine(end.date(), time(16, 0)))
|
||||
daily_file = f"{symbol}-{int(filename_start.timestamp())}-{int(filename_end.timestamp())}.cache.gz"
|
||||
file_path = f"{DATA_DIR}/tradecache/{daily_file}"
|
||||
if not force_remote and os.path.exists(file_path):
|
||||
print(f"Searching {str(start.date())} cache: " + daily_file)
|
||||
with gzip.open(file_path, 'rb') as fp:
|
||||
tradesResponse = pickle.load(fp)
|
||||
print("FOUND in CACHE", daily_file)
|
||||
return dict_to_df(tradesResponse, start, end, exclude_conditions, minsize)
|
||||
|
||||
print("NOT FOUND. Fetching from remote")
|
||||
client = StockHistoricalDataClient(ACCOUNT1_PAPER_API_KEY, ACCOUNT1_PAPER_SECRET_KEY, raw_data=True)
|
||||
stockTradeRequest = StockTradesRequest(symbol_or_symbols=symbol, start=start, end=end)
|
||||
last_exception = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
tradesResponse = client.get_stock_trades(stockTradeRequest)
|
||||
print("Remote Fetch DAY DATA Complete", start, end)
|
||||
return tradesResponse
|
||||
is_empty = not tradesResponse[symbol]
|
||||
print(f"Remote fetched: {is_empty=}", start, end)
|
||||
if in_main_session and not is_empty:
|
||||
current_time = datetime.now().astimezone(zoneNY)
|
||||
if not (start < current_time < end):
|
||||
with gzip.open(file_path, 'wb') as fp:
|
||||
pickle.dump(tradesResponse, fp)
|
||||
print("Saving to Trade CACHE", file_path)
|
||||
|
||||
else: # Don't save the cache if the market is still open
|
||||
print("Not saving trade cache, market still open today")
|
||||
return pd.DataFrame() if is_empty else dict_to_df(tradesResponse, start, end, exclude_conditions, minsize)
|
||||
except Exception as e:
|
||||
print(f"Attempt {attempt + 1} failed: {e}")
|
||||
last_exception = e
|
||||
time.sleep(backoff_factor * (2 ** attempt))
|
||||
time_module.sleep(backoff_factor * (2 ** attempt) + random.uniform(0, 1)) # Adding random jitter
|
||||
|
||||
print("All attempts to fetch data failed.")
|
||||
raise ConnectionError(f"Failed to fetch stock trades after {max_retries} retries. Last exception: {str(last_exception)} and {format_exc()}")
|
||||
|
||||
|
||||
@jit(nopython=True)
|
||||
def ohlcv_bars(ticks, start_time, end_time, resolution):
|
||||
def fetch_trades_parallel(symbol, start_date, end_date, exclude_conditions = cfh.config_handler.get_val('AGG_EXCLUDED_TRADES'), minsize = 100, force_remote = False, max_workers=None):
|
||||
"""
|
||||
Generate OHLCV bars from tick data, skipping intervals without trading activity.
|
||||
|
||||
Parameters:
|
||||
- ticks: numpy array with columns [timestamp, price, size]
|
||||
- start_time: the start timestamp for bars (Unix timestamp)
|
||||
- end_time: the end timestamp for bars (Unix timestamp)
|
||||
- resolution: time resolution in seconds
|
||||
|
||||
Returns:
|
||||
- OHLCV bars as a numpy array
|
||||
"""
|
||||
num_bars = (end_time - start_time) // resolution + 1
|
||||
bar_list = []
|
||||
Fetches trades for each day between start_date and end_date during market hours (9:30-16:00) in parallel and concatenates them into a single DataFrame.
|
||||
|
||||
for i in range(num_bars):
|
||||
bar_start_time = start_time + i * resolution
|
||||
bar_end_time = bar_start_time + resolution
|
||||
bar_ticks = ticks[(ticks[:, 0] >= bar_start_time) & (ticks[:, 0] < bar_end_time)]
|
||||
:param symbol: Stock symbol.
|
||||
:param start_date: Start date as datetime.
|
||||
:param end_date: End date as datetime.
|
||||
:return: DataFrame containing all trades from start_date to end_date.
|
||||
"""
|
||||
futures = []
|
||||
results = []
|
||||
|
||||
market_open_days = fetch_calendar_data(start_date, end_date)
|
||||
day_count = len(market_open_days)
|
||||
print("Contains", day_count, " market days")
|
||||
max_workers = min(10, max(2, day_count // 2)) if max_workers is None else max_workers # Heuristic: half the days to process, but at least 1 and no more than 10
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
#for single_date in (start_date + timedelta(days=i) for i in range((end_date - start_date).days + 1)):
|
||||
for market_day in tqdm(market_open_days, desc="Processing market days"):
|
||||
#start = datetime.combine(single_date, time(9, 30)) # Market opens at 9:30 AM
|
||||
#end = datetime.combine(single_date, time(16, 0)) # Market closes at 4:00 PM
|
||||
|
||||
interval_from = zoneNY.localize(market_day.open)
|
||||
interval_to = zoneNY.localize(market_day.close)
|
||||
|
||||
#pripadne orizneme pokud je pozadovane pozdejsi zacatek a drivejsi konek
|
||||
start = start_date if interval_from < start_date else interval_from
|
||||
#start = max(start_date, interval_from)
|
||||
end = end_date if interval_to > end_date else interval_to
|
||||
#end = min(end_date, interval_to)
|
||||
|
||||
future = executor.submit(fetch_daily_stock_trades, symbol, start, end, exclude_conditions, minsize, force_remote)
|
||||
futures.append(future)
|
||||
|
||||
if bar_ticks.shape[0] == 0:
|
||||
continue # Skip this bar as there are no ticks
|
||||
for future in tqdm(futures, desc="Fetching data"):
|
||||
try:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
except Exception as e:
|
||||
print(f"Error fetching data for a day: {e}")
|
||||
|
||||
# Calculate OHLCV values
|
||||
open_price = bar_ticks[0, 1] # open
|
||||
high_price = np.max(bar_ticks[:, 1]) # high
|
||||
low_price = np.min(bar_ticks[:, 1]) # low
|
||||
close_price = bar_ticks[-1, 1] # close
|
||||
volume = np.sum(bar_ticks[:, 2]) # volume
|
||||
bar_time = bar_start_time # timestamp for the bar
|
||||
# Batch concatenation to improve speed
|
||||
batch_size = 10
|
||||
batches = [results[i:i + batch_size] for i in range(0, len(results), batch_size)]
|
||||
final_df = pd.concat([pd.concat(batch, ignore_index=False) for batch in batches], ignore_index=False)
|
||||
|
||||
bar_list.append([open_price, high_price, low_price, close_price, volume, bar_time])
|
||||
return final_df
|
||||
|
||||
# Convert list to numpy array
|
||||
if bar_list:
|
||||
ohlcv = np.array(bar_list)
|
||||
else:
|
||||
ohlcv = np.empty((0, 6)) # return an empty array if no bars were created
|
||||
#original version
|
||||
#return pd.concat(results, ignore_index=False)
|
||||
|
||||
return ohlcv
|
||||
@jit(nopython=True)
|
||||
def generate_dollar_bars_nb(ticks, amount_per_bar):
|
||||
""""
|
||||
Generates Dollar based bars from ticks.
|
||||
|
||||
There is also simple prevention of aggregation from different days
|
||||
as described here https://chatgpt.com/c/17804fc1-a7bc-495d-8686-b8392f3640a2
|
||||
Downside: split days by UTC (which is ok for main session, but when extended hours it should be reworked by preprocessing new column identifying session)
|
||||
|
||||
|
||||
When trade is split into multiple bars it is counted as trade in each of the bars.
|
||||
Other option: trade count can be proportionally distributed by weight (0.2 to 1st bar, 0.8 to 2nd bar) - but this is not implemented yet
|
||||
https://chatgpt.com/c/ff4802d9-22a2-4b72-8ab7-97a91e7a515f
|
||||
"""""
|
||||
ohlcv_bars = []
|
||||
remaining_amount = amount_per_bar
|
||||
|
||||
# Initialize bar values based on the first tick to avoid uninitialized values
|
||||
open_price = ticks[0, 1]
|
||||
high_price = ticks[0, 1]
|
||||
low_price = ticks[0, 1]
|
||||
close_price = ticks[0, 1]
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
current_day = np.floor(ticks[0, 0] / 86400) # Calculate the initial day from the first tick timestamp
|
||||
bar_time = ticks[0, 0] # Initialize bar time with the time of the first tick
|
||||
|
||||
for tick in ticks:
|
||||
tick_time = tick[0]
|
||||
price = tick[1]
|
||||
tick_volume = tick[2]
|
||||
tick_amount = price * tick_volume
|
||||
tick_day = np.floor(tick_time / 86400) # Calculate the day of the current tick
|
||||
|
||||
# Check if the new tick is from a different day, then close the current bar
|
||||
if tick_day != current_day:
|
||||
if trades_count > 0:
|
||||
ohlcv_bars.append([bar_time, open_price, high_price, low_price, close_price, volume, trades_count, amount_per_bar, tick_time])
|
||||
# Reset for the new day using the current tick data
|
||||
open_price = price
|
||||
high_price = price
|
||||
low_price = price
|
||||
close_price = price
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
remaining_amount = amount_per_bar
|
||||
current_day = tick_day
|
||||
bar_time = tick_time
|
||||
|
||||
# Start new bar if needed because of the dollar value
|
||||
while tick_amount > 0:
|
||||
if tick_amount < remaining_amount:
|
||||
# Add the entire tick to the current bar
|
||||
high_price = max(high_price, price)
|
||||
low_price = min(low_price, price)
|
||||
close_price = price
|
||||
volume += tick_volume
|
||||
remaining_amount -= tick_amount
|
||||
trades_count += 1
|
||||
tick_amount = 0
|
||||
else:
|
||||
# Calculate the amount of volume that fits within the remaining dollar amount
|
||||
volume_to_add = remaining_amount / price
|
||||
volume += volume_to_add # Update the volume here before appending and resetting
|
||||
|
||||
# Append the partially filled bar to the list
|
||||
ohlcv_bars.append([bar_time, open_price, high_price, low_price, close_price, volume, trades_count + 1, amount_per_bar, tick_time])
|
||||
|
||||
# Fill the current bar and continue with a new bar
|
||||
tick_volume -= volume_to_add
|
||||
tick_amount -= remaining_amount
|
||||
|
||||
# Reset bar values for the new bar using the current tick data
|
||||
open_price = price
|
||||
high_price = price
|
||||
low_price = price
|
||||
close_price = price
|
||||
volume = 0 # Reset volume for the new bar
|
||||
trades_count = 0
|
||||
remaining_amount = amount_per_bar
|
||||
|
||||
# Increment bar time if splitting a trade
|
||||
if tick_volume > 0: #pokud v tradu je jeste zbytek nastavujeme cas o nanosekundu vetsi
|
||||
bar_time = tick_time + 1e-6
|
||||
else:
|
||||
bar_time = tick_time #jinak nastavujeme cas ticku
|
||||
#bar_time = tick_time
|
||||
|
||||
# Add the last bar if it contains any trades
|
||||
if trades_count > 0:
|
||||
ohlcv_bars.append([bar_time, open_price, high_price, low_price, close_price, volume, trades_count, amount_per_bar, tick_time])
|
||||
|
||||
return np.array(ohlcv_bars)
|
||||
|
||||
|
||||
@jit(nopython=True)
|
||||
def generate_volume_bars_nb(ticks, volume_per_bar):
|
||||
""""
|
||||
Generates Volume based bars from ticks.
|
||||
|
||||
NOTE: UTC day split here (doesnt aggregate trades from different days)
|
||||
but realized from UTC (ok for main session) - but needs rework for extension by preprocessing ticks_df and introduction sesssion column
|
||||
|
||||
When trade is split into multiple bars it is counted as trade in each of the bars.
|
||||
Other option: trade count can be proportionally distributed by weight (0.2 to 1st bar, 0.8 to 2nd bar) - but this is not implemented yet
|
||||
https://chatgpt.com/c/ff4802d9-22a2-4b72-8ab7-97a91e7a515f
|
||||
"""""
|
||||
ohlcv_bars = []
|
||||
remaining_volume = volume_per_bar
|
||||
|
||||
# Initialize bar values based on the first tick to avoid uninitialized values
|
||||
open_price = ticks[0, 1]
|
||||
high_price = ticks[0, 1]
|
||||
low_price = ticks[0, 1]
|
||||
close_price = ticks[0, 1]
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
current_day = np.floor(ticks[0, 0] / 86400) # Calculate the initial day from the first tick timestamp
|
||||
bar_time = ticks[0, 0] # Initialize bar time with the time of the first tick
|
||||
buy_volume = 0 # Volume of buy trades
|
||||
sell_volume = 0 # Volume of sell trades
|
||||
prev_price = ticks[0, 1] # Initialize previous price for the first tick
|
||||
|
||||
for tick in ticks:
|
||||
tick_time = tick[0]
|
||||
price = tick[1]
|
||||
tick_volume = tick[2]
|
||||
tick_day = np.floor(tick_time / 86400) # Calculate the day of the current tick
|
||||
|
||||
# Check if the new tick is from a different day, then close the current bar
|
||||
if tick_day != current_day:
|
||||
if trades_count > 0:
|
||||
ohlcv_bars.append([bar_time, open_price, high_price, low_price, close_price, volume, trades_count, tick_time, buy_volume, sell_volume])
|
||||
# Reset for the new day using the current tick data
|
||||
open_price = price
|
||||
high_price = price
|
||||
low_price = price
|
||||
close_price = price
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
remaining_volume = volume_per_bar
|
||||
current_day = tick_day
|
||||
bar_time = tick_time # Update bar time to the current tick time
|
||||
buy_volume = 0
|
||||
sell_volume = 0
|
||||
# Reset previous tick price (calulating imbalance for each day from the start)
|
||||
prev_price = price
|
||||
|
||||
# Start new bar if needed because of the volume
|
||||
while tick_volume > 0:
|
||||
if tick_volume < remaining_volume:
|
||||
# Add the entire tick to the current bar
|
||||
high_price = max(high_price, price)
|
||||
low_price = min(low_price, price)
|
||||
close_price = price
|
||||
volume += tick_volume
|
||||
remaining_volume -= tick_volume
|
||||
trades_count += 1
|
||||
|
||||
# Update buy and sell volumes
|
||||
if price > prev_price:
|
||||
buy_volume += tick_volume
|
||||
elif price < prev_price:
|
||||
sell_volume += tick_volume
|
||||
|
||||
tick_volume = 0
|
||||
else:
|
||||
# Fill the current bar and continue with a new bar
|
||||
volume_to_add = remaining_volume
|
||||
volume += volume_to_add
|
||||
tick_volume -= volume_to_add
|
||||
trades_count += 1
|
||||
|
||||
# Update buy and sell volumes
|
||||
if price > prev_price:
|
||||
buy_volume += volume_to_add
|
||||
elif price < prev_price:
|
||||
sell_volume += volume_to_add
|
||||
|
||||
# Append the completed bar to the list
|
||||
ohlcv_bars.append([bar_time, open_price, high_price, low_price, close_price, volume, trades_count, tick_time, buy_volume, sell_volume])
|
||||
|
||||
# Reset bar values for the new bar using the current tick data
|
||||
open_price = price
|
||||
high_price = price
|
||||
low_price = price
|
||||
close_price = price
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
remaining_volume = volume_per_bar
|
||||
buy_volume = 0
|
||||
sell_volume = 0
|
||||
|
||||
# Increment bar time if splitting a trade
|
||||
if tick_volume > 0: # If there's remaining volume in the trade, set bar time slightly later
|
||||
bar_time = tick_time + 1e-6
|
||||
else:
|
||||
bar_time = tick_time # Otherwise, set bar time to the tick time
|
||||
|
||||
prev_price = price
|
||||
|
||||
# Add the last bar if it contains any trades
|
||||
if trades_count > 0:
|
||||
ohlcv_bars.append([bar_time, open_price, high_price, low_price, close_price, volume, trades_count, tick_time, buy_volume, sell_volume])
|
||||
|
||||
return np.array(ohlcv_bars)
|
||||
|
||||
@jit(nopython=True)
|
||||
def generate_time_bars_nb(ticks, resolution):
|
||||
# Initialize the start and end time
|
||||
start_time = np.floor(ticks[0, 0] / resolution) * resolution
|
||||
end_time = np.floor(ticks[-1, 0] / resolution) * resolution
|
||||
|
||||
# # Calculate number of bars
|
||||
# num_bars = int((end_time - start_time) // resolution + 1)
|
||||
|
||||
# Using a list to append data only when trades exist
|
||||
ohlcv_bars = []
|
||||
|
||||
# Variables to track the current bar
|
||||
current_bar_index = -1
|
||||
open_price = 0
|
||||
high_price = -np.inf
|
||||
low_price = np.inf
|
||||
close_price = 0
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
vwap_cum_volume_price = 0 # Cumulative volume * price
|
||||
cum_volume = 0 # Cumulative volume for VWAP
|
||||
buy_volume = 0 # Volume of buy trades
|
||||
sell_volume = 0 # Volume of sell trades
|
||||
prev_price = ticks[0, 1] # Initialize previous price for the first tick
|
||||
prev_day = np.floor(ticks[0, 0] / 86400) # Calculate the initial day from the first tick timestamp
|
||||
|
||||
for tick in ticks:
|
||||
curr_time = tick[0] #updated time
|
||||
tick_time = np.floor(tick[0] / resolution) * resolution
|
||||
price = tick[1]
|
||||
tick_volume = tick[2]
|
||||
tick_day = np.floor(tick_time / 86400) # Calculate the day of the current tick
|
||||
|
||||
#if the new tick is from a new day, reset previous tick price (calculating imbalance starts over)
|
||||
if tick_day != prev_day:
|
||||
prev_price = price
|
||||
prev_day = tick_day
|
||||
|
||||
# Check if the tick belongs to a new bar
|
||||
if tick_time != start_time + current_bar_index * resolution:
|
||||
if current_bar_index >= 0 and trades_count > 0: # Save the previous bar if trades happened
|
||||
vwap = vwap_cum_volume_price / cum_volume if cum_volume > 0 else 0
|
||||
ohlcv_bars.append([start_time + current_bar_index * resolution, open_price, high_price, low_price, close_price, volume, trades_count, curr_time, vwap, buy_volume, sell_volume])
|
||||
|
||||
# Reset bar values
|
||||
current_bar_index = int((tick_time - start_time) / resolution)
|
||||
open_price = price
|
||||
high_price = price
|
||||
low_price = price
|
||||
volume = 0
|
||||
trades_count = 0
|
||||
vwap_cum_volume_price = 0
|
||||
cum_volume = 0
|
||||
buy_volume = 0
|
||||
sell_volume = 0
|
||||
|
||||
# Update the OHLCV values for the current bar
|
||||
high_price = max(high_price, price)
|
||||
low_price = min(low_price, price)
|
||||
close_price = price
|
||||
volume += tick_volume
|
||||
trades_count += 1
|
||||
vwap_cum_volume_price += price * tick_volume
|
||||
cum_volume += tick_volume
|
||||
|
||||
# Update buy and sell volumes
|
||||
if price > prev_price:
|
||||
buy_volume += tick_volume
|
||||
elif price < prev_price:
|
||||
sell_volume += tick_volume
|
||||
|
||||
prev_price = price
|
||||
|
||||
# Save the last processed bar
|
||||
if trades_count > 0:
|
||||
vwap = vwap_cum_volume_price / cum_volume if cum_volume > 0 else 0
|
||||
ohlcv_bars.append([start_time + current_bar_index * resolution, open_price, high_price, low_price, close_price, volume, trades_count, curr_time, vwap, buy_volume, sell_volume])
|
||||
|
||||
return np.array(ohlcv_bars)
|
||||
|
||||
# Example usage
|
||||
if __name__ == '__main__':
|
||||
# symbol = ["BAC"]
|
||||
# #datetime in zoneNY
|
||||
# day_start = datetime(2024, 4, 22, 9, 30, 0)
|
||||
# day_stop = datetime(2024, 4, 22, 16, 00, 0)
|
||||
|
||||
# day_start = zoneNY.localize(day_start)
|
||||
# day_stop = zoneNY.localize(day_stop)
|
||||
|
||||
# tradesResponse = fetch_stock_trades(symbol, day_start, day_stop)
|
||||
|
||||
# df = tradesResponse.df
|
||||
# df.to_parquet('trades_bac.parquet', engine='pyarrow')
|
||||
|
||||
df=pd.read_parquet('trades_bac.parquet',engine='pyarrow')
|
||||
print(df)
|
||||
|
||||
#df = pd.read_csv('tick_data.csv') # DF with tick data
|
||||
# Assuming 'df' is your DataFrame with columns 'time', 'price', 'size', 'condition'
|
||||
exclude_conditions = ['ConditionA', 'ConditionB'] # Conditions to exclude
|
||||
df_filtered = df[~df['condition'].isin(exclude_conditions)]
|
||||
# Define your start and end times based on your trading session, ensure these are Unix timestamps
|
||||
start_time = pd.to_datetime('2023-01-01 09:30:00').timestamp()
|
||||
end_time = pd.to_datetime('2023-01-01 16:00:00').timestamp()
|
||||
ticks = df[['time', 'price', 'size']].to_numpy()
|
||||
ticks[:, 0] = pd.to_datetime(ticks[:, 0]).astype('int64') // 1_000_000_000 # Convert to Unix timestamp
|
||||
resolution_seconds = 1 # 1 second resolution
|
||||
ohlcv_data = ohlcv_bars(ticks, start_time, end_time, resolution_seconds)
|
||||
|
||||
# Converting the result back to DataFrame for better usability
|
||||
ohlcv_df = pd.DataFrame(ohlcv_data, columns=['Open', 'High', 'Low', 'Close', 'Volume', 'Time'])
|
||||
ohlcv_df['Time'] = pd.to_datetime(ohlcv_df['Time'], unit='s') # Convert timestamps back to datetime
|
||||
pass
|
||||
#example in agg_vect.ipynb
|
||||
@ -11,7 +11,7 @@ import uvicorn
|
||||
from uuid import UUID
|
||||
from v2realbot.utils.ilog import get_log_window
|
||||
from v2realbot.common.model import RunManagerRecord, StrategyInstance, RunnerView, RunRequest, Trade, RunArchive, RunArchiveView, RunArchiveViewPagination, RunArchiveDetail, Bar, RunArchiveChange, TestList, ConfigItem, InstantIndicator, DataTablesRequest, AnalyzerInputs
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, status, WebSocketException, Cookie, Query
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, status, WebSocketException, Cookie, Query, Request
|
||||
from fastapi.responses import FileResponse, StreamingResponse, JSONResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
||||
@ -35,7 +35,7 @@ from traceback import format_exc
|
||||
#from v2realbot.reporting.optimizecutoffs import find_optimal_cutoff
|
||||
import v2realbot.reporting.analyzer as ci
|
||||
import shutil
|
||||
from starlette.responses import JSONResponse
|
||||
from starlette.responses import JSONResponse, HTMLResponse, FileResponse, RedirectResponse
|
||||
import mlroom
|
||||
import mlroom.utils.mlutils as ml
|
||||
from typing import List
|
||||
@ -74,14 +74,52 @@ def api_key_auth(api_key: str = Depends(X_API_KEY)):
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Forbidden"
|
||||
)
|
||||
|
||||
|
||||
def authenticate_user(credentials: HTTPBasicCredentials = Depends(HTTPBasic())):
|
||||
correct_username = "david"
|
||||
correct_password = "david"
|
||||
|
||||
if credentials.username == correct_username and credentials.password == correct_password:
|
||||
return True
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect username or password",
|
||||
headers={"WWW-Authenticate": "Basic"},
|
||||
)
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
root = os.path.dirname(os.path.abspath(__file__))
|
||||
app.mount("/static", StaticFiles(html=True, directory=os.path.join(root, 'static')), name="static")
|
||||
#app.mount("/static", StaticFiles(html=True, directory=os.path.join(root, 'static')), name="static")
|
||||
app.mount("/media", StaticFiles(directory=str(MEDIA_DIRECTORY)), name="media")
|
||||
#app.mount("/", StaticFiles(html=True, directory=os.path.join(root, 'static')), name="www")
|
||||
|
||||
security = HTTPBasic()
|
||||
@app.get("/static/{path:path}")
|
||||
async def static_files(request: Request, path: str, authenticated: bool = Depends(authenticate_user)):
|
||||
root = os.path.dirname(os.path.abspath(__file__))
|
||||
static_dir = os.path.join(root, 'static')
|
||||
|
||||
if not path or path == "/":
|
||||
file_path = os.path.join(static_dir, 'index.html')
|
||||
else:
|
||||
file_path = os.path.join(static_dir, path)
|
||||
|
||||
# Check if path is a directory
|
||||
if os.path.isdir(file_path):
|
||||
# If it's a directory, try to serve index.html within that directory
|
||||
index_path = os.path.join(file_path, 'index.html')
|
||||
if os.path.exists(index_path):
|
||||
return FileResponse(index_path)
|
||||
else:
|
||||
# Optionally, you can return a directory listing or a custom 404 page here
|
||||
return HTMLResponse("Directory listing not enabled.", status_code=403)
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
raise HTTPException(status_code=404, detail="File not found")
|
||||
|
||||
return FileResponse(file_path)
|
||||
|
||||
def get_current_username(
|
||||
credentials: Annotated[HTTPBasicCredentials, Depends(security)]
|
||||
@ -103,9 +141,9 @@ async def get_api_key(
|
||||
return session or api_key
|
||||
|
||||
#TODO predelat z Async?
|
||||
@app.get("/static")
|
||||
async def get(username: Annotated[str, Depends(get_current_username)]):
|
||||
return FileResponse("index.html")
|
||||
# @app.get("/static")
|
||||
# async def get(username: Annotated[str, Depends(get_current_username)]):
|
||||
# return FileResponse("index.html")
|
||||
|
||||
@app.websocket("/runners/{runner_id}/ws")
|
||||
async def websocket_endpoint(
|
||||
|
||||
@ -1150,7 +1150,7 @@
|
||||
<script src="/static/js/config.js?v=1.04"></script>
|
||||
<!-- tady zacina polska docasna lokalizace -->
|
||||
<!-- <script type="text/javascript" src="https://unpkg.com/lightweight-charts/dist/lightweight-charts.standalone.production.js"></script> -->
|
||||
<script type="text/javascript" src="/static/js/libs/lightweightcharts/lightweight-charts.standalone.production410.js"></script>
|
||||
<script type="text/javascript" src="/static/js/libs/lightweightcharts/lightweight-charts.standalone.production413.js"></script>
|
||||
<script src="/static/js/dynamicbuttons.js?v=1.05"></script>
|
||||
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@ -371,9 +371,10 @@ function initialize_chart() {
|
||||
}
|
||||
|
||||
chart = LightweightCharts.createChart(document.getElementById('chart'), chartOptions);
|
||||
chart.applyOptions({ timeScale: { visible: true, timeVisible: true, secondsVisible: true }, crosshair: {
|
||||
chart.applyOptions({ timeScale: { visible: true, timeVisible: true, secondsVisible: true, minBarSpacing: 0.003}, crosshair: {
|
||||
mode: LightweightCharts.CrosshairMode.Normal, labelVisible: true
|
||||
}})
|
||||
console.log("chart intiialized")
|
||||
}
|
||||
|
||||
//mozna atributy last value visible
|
||||
|
||||
0
v2realbot/strategyblocks/activetrade/__init__.py
Normal file
0
v2realbot/strategyblocks/activetrade/__init__.py
Normal file
0
v2realbot/strategyblocks/indicators/__init__.py
Normal file
0
v2realbot/strategyblocks/indicators/__init__.py
Normal file
0
v2realbot/strategyblocks/inits/__init__.py
Normal file
0
v2realbot/strategyblocks/inits/__init__.py
Normal file
0
v2realbot/strategyblocks/newtrade/__init__.py
Normal file
0
v2realbot/strategyblocks/newtrade/__init__.py
Normal file
0
v2realbot/tools/__init__.py
Normal file
0
v2realbot/tools/__init__.py
Normal file
@ -9,7 +9,7 @@ from typing import List
|
||||
from enum import Enum
|
||||
import numpy as np
|
||||
import v2realbot.controller.services as cs
|
||||
from rich import print
|
||||
from rich import print as richprint
|
||||
from v2realbot.common.model import AnalyzerInputs
|
||||
from v2realbot.common.PrescribedTradeModel import TradeDirection, TradeStatus, Trade, TradeStoplossType
|
||||
from v2realbot.utils.utils import isrising, isfalling,zoneNY, price2dec, safe_get#, print
|
||||
@ -94,7 +94,11 @@ def convert_to_dataframe(ohlcv):
|
||||
|
||||
return df
|
||||
|
||||
def load_batch(runner_ids: List = None, batch_id: str = None, space_resolution_evenly = False, main_session_only = True, merge_ind2bars = True, bars_columns = ['Open', 'High', 'Low', 'Close', 'Volume', 'Vwap'], indicators_columns = []) -> Tuple[int, dict]:
|
||||
def print(v, *args, **kwargs):
|
||||
if v:
|
||||
richprint(*args, **kwargs)
|
||||
|
||||
def load_batch(runner_ids: List = None, batch_id: str = None, space_resolution_evenly = False, main_session_only = True, merge_ind2bars = True, bars_columns = ['Open', 'High', 'Low', 'Close', 'Volume', 'Vwap'], indicators_columns = [], verbose = False) -> Tuple[int, dict]:
|
||||
"""Load batches (all runners from single batch) into pandas dataframes
|
||||
|
||||
Args:
|
||||
@ -136,7 +140,7 @@ def load_batch(runner_ids: List = None, batch_id: str = None, space_resolution_e
|
||||
|
||||
if resolution is None:
|
||||
resolution = sada["bars"]["resolution"][0]
|
||||
print(f"Resolution : {resolution}")
|
||||
print(verbose, f"Resolution : {resolution}")
|
||||
|
||||
#add daily bars limited to required columns, we keep updated as its mapping column to indicators
|
||||
bars = convert_to_dataframe(sada["bars"])[bars_columns + ["updated"]]
|
||||
@ -169,11 +173,11 @@ def load_batch(runner_ids: List = None, batch_id: str = None, space_resolution_e
|
||||
num_duplicates = concat_df.index.duplicated().sum()
|
||||
|
||||
if num_duplicates > 0:
|
||||
print(f"NOTE: DUPLICATES {num_duplicates}/{len(concat_df)} in {key}. REMOVING.")
|
||||
print(verbose, f"NOTE: DUPLICATES {num_duplicates}/{len(concat_df)} in {key}. REMOVING.")
|
||||
concat_df = concat_df[~concat_df.index.duplicated()]
|
||||
|
||||
num_duplicates = concat_df.index.duplicated().sum()
|
||||
print(f"Now there are {num_duplicates}/{len(concat_df)}")
|
||||
print(verbose, f"Now there are {num_duplicates}/{len(concat_df)}")
|
||||
|
||||
if space_resolution_evenly and key != "cbar_indicators":
|
||||
# Apply rounding to the datetime index according to resolution (in seconds)
|
||||
|
||||
@ -5,6 +5,7 @@ from alpaca.data.enums import DataFeed
|
||||
import v2realbot.utils.config_defaults as config_defaults
|
||||
from v2realbot.enums.enums import FillCondition
|
||||
from rich import print
|
||||
# from v2realbot.utils.utils import print
|
||||
|
||||
def aggregate_configurations(module):
|
||||
return {key: getattr(module, key) for key in dir(module) if key.isupper()}
|
||||
@ -48,8 +49,8 @@ class ConfigHandler:
|
||||
self.active_config = self.default_config.copy()
|
||||
self.active_config.update(override_configuration)
|
||||
self.active_profile = profile_name
|
||||
print(f"Profile {profile_name} loaded successfully.")
|
||||
print("Current values:", self.active_config)
|
||||
#print(f"Profile {profile_name} loaded successfully.")
|
||||
#print("Current values:", self.active_config)
|
||||
else:
|
||||
print(f"Profile {profile_name} does not exist in config item: {config_directive}")
|
||||
except Exception as e:
|
||||
@ -93,7 +94,9 @@ class ConfigHandler:
|
||||
return FillCondition(value)
|
||||
case "BT_FILL_CONDITION_SELL_LIMIT":
|
||||
return FillCondition(value)
|
||||
# Add cases for other enumeration conversions as needed
|
||||
case "AGG_EXCLUDED_TRADES":
|
||||
return sorted(value) # Convert to sorted
|
||||
# Add cases for other enumeration conversions or transformations as needed
|
||||
case _:
|
||||
return value
|
||||
|
||||
@ -102,8 +105,8 @@ class ConfigHandler:
|
||||
|
||||
# Global configuratio - it is imported by modules that need it. In the future can be changed to Dependency Ingestion (each service will have the config instance as input parameter)
|
||||
config_handler = ConfigHandler()
|
||||
print(f"{config_handler.active_profile=}")
|
||||
print("config handler initialized")
|
||||
#print(f"{config_handler.active_profile=}")
|
||||
#print("config handler initialized")
|
||||
|
||||
#this is how to get value
|
||||
#config_handler.get_val('BT_FILL_PRICE_MARKET_ORDER_PREMIUM')
|
||||
|
||||
Reference in New Issue
Block a user