Initial commit after copying files from flawed repository

This commit is contained in:
David Brazda
2024-08-30 20:49:53 +02:00
commit c11ed9d474
47 changed files with 40520 additions and 0 deletions

View File

@ -0,0 +1,700 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "be764faf-65cc-408a-8ebd-96b8b4f14b60",
"metadata": {},
"source": [
"# Basic RSI strategy"
]
},
{
"cell_type": "markdown",
"id": "d15aa106-cb66-4fc8-b07d-347c078c634a",
"metadata": {
"tags": []
},
"source": [
"## Single backtest"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dee14aee-a14c-4e54-bf82-dbe9f64cbd62",
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *\n",
"# whats_imported()\n",
"\n",
"vbt.settings.set_theme('dark')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "32105542-882d-4403-a86c-fdcd8002e258",
"metadata": {},
"outputs": [],
"source": [
"data = vbt.BinanceData.pull('BTCUSDT')\n",
"data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "843dc221-4353-47b7-9f3a-68b6b8ba8752",
"metadata": {},
"outputs": [],
"source": [
"data.data['BTCUSDT'].vbt.ohlcv.plot().show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f14f65d8-a8b8-4cb0-a135-258b01b5ff83",
"metadata": {},
"outputs": [],
"source": [
"data.data['BTCUSDT'].info()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b3ef3cc2-c70f-4469-8067-fe5e1c6a06f3",
"metadata": {},
"outputs": [],
"source": [
"open_price = data.get('Open')\n",
"close_price = data.get('Close')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4de98bf9-7f0e-48d8-b532-a917500d1f18",
"metadata": {},
"outputs": [],
"source": [
"vbt.IF.list_indicators(\"RSI*\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af3df97f-b81e-4186-bf8e-adb50b29c5c0",
"metadata": {},
"outputs": [],
"source": [
"vbt.indicator(\"talib:RSI\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ce59b4c-7e47-4484-be62-0e26a2dc177e",
"metadata": {},
"outputs": [],
"source": [
"vbt.RSI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70589f4f-fa0f-448f-95f6-77c4833beede",
"metadata": {},
"outputs": [],
"source": [
"vbt.talib('RSI')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6ba90109-ca96-49c5-8ea9-2ad90755b43a",
"metadata": {},
"outputs": [],
"source": [
"vbt.ta('RSIIndicator')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cd56e11e-bf9e-4304-842f-6df7b2b0b738",
"metadata": {},
"outputs": [],
"source": [
"vbt.pandas_ta('RSI')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "37d1140d-5ef0-4d22-9100-2b29e0e9f2ed",
"metadata": {},
"outputs": [],
"source": [
"print(vbt.format_func(vbt.RSI.run))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bee38f74-6c29-4cc9-a37e-25fe44c08bc8",
"metadata": {},
"outputs": [],
"source": [
"rsi = vbt.RSI.run(open_price)\n",
"rsi"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "943a0d3c-c8f3-4359-9387-744e5bb3da44",
"metadata": {},
"outputs": [],
"source": [
"rsi.rsi"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "69636dd4-8592-48b8-b4cb-fabf7cbd7b15",
"metadata": {},
"outputs": [],
"source": [
"entries = rsi.rsi.vbt.crossed_below(30)\n",
"entries"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b073280b-69ea-43ba-8694-4c2b506be57e",
"metadata": {},
"outputs": [],
"source": [
"exits = rsi.rsi.vbt.crossed_above(70)\n",
"exits"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "93ea75b8-5b75-4767-8adf-b535c9588147",
"metadata": {},
"outputs": [],
"source": [
"entries = rsi.rsi_crossed_below(30)\n",
"exits = rsi.rsi_crossed_above(70)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c127d5f5-eee4-4d6a-ae26-7592f5d3ab65",
"metadata": {},
"outputs": [],
"source": [
"def plot_rsi(rsi, entries, exits):\n",
" fig = rsi.plot()\n",
" entries.vbt.signals.plot_as_entries(rsi.rsi, fig=fig)\n",
" exits.vbt.signals.plot_as_exits(rsi.rsi, fig=fig)\n",
" return fig"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6743e20c-f2e6-4b64-9934-022fcc10acb7",
"metadata": {},
"outputs": [],
"source": [
"plot_rsi(rsi, entries, exits).show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e1ef2e8-e225-4d60-891a-6c3e9cc2821b",
"metadata": {},
"outputs": [],
"source": [
"clean_entries, clean_exits = entries.vbt.signals.clean(exits)\n",
"\n",
"plot_rsi(rsi, clean_entries, clean_exits).show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1d2d4028-034b-4211-9a24-b82944b264da",
"metadata": {},
"outputs": [],
"source": [
"clean_entries.vbt.signals.total()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aeb5bbf3-1217-4c2f-a53a-3d426ac069c0",
"metadata": {},
"outputs": [],
"source": [
"clean_exits.vbt.signals.total()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f64bd28c-6dc9-476d-9bb1-a2e137a39dfe",
"metadata": {},
"outputs": [],
"source": [
"ranges = clean_entries.vbt.signals.between_ranges(target=clean_exits)\n",
"ranges.duration.mean(wrap_kwargs=dict(to_timedelta=True))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f55e1c9-9960-43ea-b148-41a6131caa7b",
"metadata": {},
"outputs": [],
"source": [
"pf = vbt.Portfolio.from_signals(\n",
" close=close_price, \n",
" entries=clean_entries, \n",
" exits=clean_exits,\n",
" size=100,\n",
" size_type='value',\n",
" init_cash='auto'\n",
")\n",
"pf"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "02c8de50-5101-4a23-9d58-9c06c1ae4460",
"metadata": {},
"outputs": [],
"source": [
"pf.stats()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a6377f72-4174-49a1-8a9c-ad8095bc4437",
"metadata": {},
"outputs": [],
"source": [
"pf.plot(settings=dict(bm_returns=False)).show_svg()"
]
},
{
"cell_type": "markdown",
"id": "c1f78a0a-7bc9-4b32-9f3c-92d0da44d87b",
"metadata": {},
"source": [
"## Multiple backtests"
]
},
{
"cell_type": "markdown",
"id": "909f506d-8edb-4fa8-8425-5e9c4b6e7b1b",
"metadata": {},
"source": [
"### Using for-loop"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a781ea63-d4df-41c5-820e-195afd3770c9",
"metadata": {},
"outputs": [],
"source": [
"def test_rsi(window=14, wtype=\"wilder\", lower_th=30, upper_th=70):\n",
" rsi = vbt.RSI.run(open_price, window=window, wtype=wtype)\n",
" entries = rsi.rsi_crossed_below(lower_th)\n",
" exits = rsi.rsi_crossed_above(upper_th)\n",
" pf = vbt.Portfolio.from_signals(\n",
" close=close_price, \n",
" entries=entries, \n",
" exits=exits,\n",
" size=100,\n",
" size_type='value',\n",
" init_cash='auto')\n",
" return pf.stats([\n",
" 'total_return', \n",
" 'total_trades', \n",
" 'win_rate', \n",
" 'expectancy'\n",
" ])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d60e7721-a0bb-4f2d-af92-d074ee523582",
"metadata": {},
"outputs": [],
"source": [
"test_rsi()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "14e9e01d-9bcf-4e2c-8959-f5c66e1768c7",
"metadata": {},
"outputs": [],
"source": [
"test_rsi(lower_th=20, upper_th=80)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a7864dbb-5a24-456a-bda2-468e737fa8ea",
"metadata": {},
"outputs": [],
"source": [
"from itertools import product\n",
"\n",
"lower_ths = range(20, 31)\n",
"upper_ths = range(70, 81)\n",
"th_combs = list(product(lower_ths, upper_ths))\n",
"len(th_combs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fbc7c48e-e5e3-4ec7-8f76-7169d58231de",
"metadata": {},
"outputs": [],
"source": [
"comb_stats = [\n",
" test_rsi(lower_th=lower_th, upper_th=upper_th)\n",
" for lower_th, upper_th in th_combs\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8fbae677-56f9-49bc-88f7-325ff8ee00c4",
"metadata": {},
"outputs": [],
"source": [
"comb_stats_df = pd.DataFrame(comb_stats)\n",
"print(comb_stats_df)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a99e9ded-2954-40c8-93c5-ebff4a174036",
"metadata": {},
"outputs": [],
"source": [
"comb_stats_df.index = pd.MultiIndex.from_tuples(\n",
" th_combs, \n",
" names=['lower_th', 'upper_th'])\n",
"print(comb_stats_df)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "69511f79-00cd-40e2-b64f-097137bba69e",
"metadata": {},
"outputs": [],
"source": [
"comb_stats_df['Expectancy'].vbt.heatmap().show_svg()"
]
},
{
"cell_type": "markdown",
"id": "037bfdd0-e3dc-4ac5-9c0e-91c3320fcafd",
"metadata": {},
"source": [
"### Using columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ac5bc6ba-c6e1-4ca8-98fd-19b141a4220a",
"metadata": {},
"outputs": [],
"source": [
"windows = list(range(8, 21))\n",
"wtypes = [\"simple\", \"exp\", \"wilder\"]\n",
"lower_ths = list(range(20, 31))\n",
"upper_ths = list(range(70, 81))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fb6145b0-6ee7-4ac2-a8c3-5bd7127032be",
"metadata": {},
"outputs": [],
"source": [
"rsi = vbt.RSI.run(\n",
" open_price, \n",
" window=windows, \n",
" wtype=wtypes, \n",
" param_product=True)\n",
"rsi.rsi.columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f374086a-bf84-425c-a857-0ce4ea8d6a28",
"metadata": {},
"outputs": [],
"source": [
"lower_ths_prod, upper_ths_prod = zip(*product(lower_ths, upper_ths))\n",
"len(lower_ths_prod)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "069d9ffe-cdda-45af-832d-62bfb47ede42",
"metadata": {},
"outputs": [],
"source": [
"len(upper_ths_prod)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bd96a283-b9bd-4824-9218-86efe06b8cc6",
"metadata": {},
"outputs": [],
"source": [
"lower_th_index = vbt.Param(lower_ths_prod, name='lower_th')\n",
"entries = rsi.rsi_crossed_below(lower_th_index)\n",
"entries.columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c8510247-f1e1-4af7-8b14-0fb6d6f5813b",
"metadata": {},
"outputs": [],
"source": [
"upper_th_index = vbt.Param(upper_ths_prod, name='upper_th')\n",
"exits = rsi.rsi_crossed_above(upper_th_index)\n",
"exits.columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "209be8ac-393d-4a43-b752-022f3cfd8698",
"metadata": {},
"outputs": [],
"source": [
"pf = vbt.Portfolio.from_signals(\n",
" close=close_price, \n",
" entries=entries, \n",
" exits=exits,\n",
" size=100,\n",
" size_type='value',\n",
" init_cash='auto'\n",
")\n",
"pf"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "15143c2b-b795-493c-9e8f-5aa0d877d391",
"metadata": {},
"outputs": [],
"source": [
"stats_df = pf.stats([\n",
" 'total_return', \n",
" 'total_trades', \n",
" 'win_rate', \n",
" 'expectancy'\n",
"], agg_func=None)\n",
"print(stats_df)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d65043d-2081-4179-bc94-d42d1465a898",
"metadata": {},
"outputs": [],
"source": [
">>> print(pf.getsize())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "862f2e98-dea6-4c7e-b5e9-21c16ca27de2",
"metadata": {},
"outputs": [],
"source": [
">>> np.product(pf.wrapper.shape) * 8 / 1024 / 1024"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abea668f-5f95-44d3-a158-38440f70433d",
"metadata": {},
"outputs": [],
"source": [
"stats_df['Expectancy'].groupby('rsi_window').mean()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "df815592-9f63-496e-8d88-99b1462d1030",
"metadata": {},
"outputs": [],
"source": [
"print(stats_df.sort_values(by='Expectancy', ascending=False).head())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1cb39bea-298c-4202-a996-c84b157f1ab9",
"metadata": {},
"outputs": [],
"source": [
"pf[(22, 80, 20, \"wilder\")].plot_value().show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bced4fa8-f7eb-49e4-980b-f0f4acd20540",
"metadata": {},
"outputs": [],
"source": [
"data = vbt.BinanceData.pull(['BTCUSDT', 'ETHUSDT'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "55e86189-8244-4955-b736-f4f4f840b80a",
"metadata": {},
"outputs": [],
"source": [
"open_price = data.get('Open')\n",
"close_price = data.get('Close')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bd7aefae-3f83-48ac-8f20-7d0a4760cd4b",
"metadata": {},
"outputs": [],
"source": [
"rsi = vbt.RSI.run(\n",
" open_price, \n",
" window=windows, \n",
" wtype=wtypes, \n",
" param_product=True)\n",
"entries = rsi.rsi_crossed_below(lower_th_index)\n",
"exits = rsi.rsi_crossed_above(upper_th_index)\n",
"pf = vbt.Portfolio.from_signals(\n",
" close=close_price, \n",
" entries=entries, \n",
" exits=exits,\n",
" size=100,\n",
" size_type='value',\n",
" init_cash='auto'\n",
")\n",
"stats_df = pf.stats([\n",
" 'total_return', \n",
" 'total_trades', \n",
" 'win_rate', \n",
" 'expectancy'\n",
"], agg_func=None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a7007b6a-bf08-40a6-a4be-091fec376290",
"metadata": {},
"outputs": [],
"source": [
"stats_df.index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a67661e7-d8df-4efe-8479-93d194ca255d",
"metadata": {},
"outputs": [],
"source": [
"eth_mask = stats_df.index.get_level_values('symbol') == 'ETHUSDT'\n",
"btc_mask = stats_df.index.get_level_values('symbol') == 'BTCUSDT'\n",
"pd.DataFrame({\n",
" 'ETHUSDT': stats_df[eth_mask]['Expectancy'].values,\n",
" 'BTCUSDT': stats_df[btc_mask]['Expectancy'].values\n",
"}).vbt.histplot(xaxis=dict(title=\"Expectancy\")).show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5a64e903-1256-4e4b-9d22-667f2706c9da",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,372 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "2000b27d-9007-44ad-af8a-8a070d3d0e72",
"metadata": {},
"source": [
"# How to backtest a multi-timeframe strategy"
]
},
{
"cell_type": "markdown",
"id": "44334242-c4cd-4656-987b-7e407ab99c76",
"metadata": {},
"source": [
"Multi-timeframe (MTF) analysis is an essential trading approach that involves analyzing an asset's price in different timeframes."
]
},
{
"cell_type": "markdown",
"id": "208ebc3d-f05a-4f28-95cb-3a26c89583cd",
"metadata": {},
"source": [
"Despite its popularity, MTF analysis comes with several pitfalls when working with arrays, including look-ahead bias and information loss."
]
},
{
"cell_type": "markdown",
"id": "0a915599-672d-4609-b800-c3b32c38bcdd",
"metadata": {},
"source": [
"Many native pandas implementations mistakenly assume that events, such as indicator calculations, take place at the same timestamp as the data provided by the exchange, which is typically the opening time of a bar."
]
},
{
"cell_type": "markdown",
"id": "b0a48b90-2dbe-4f1a-8924-6cddb8ba9e8c",
"metadata": {},
"source": [
"VBT operates under the assumption that the exact timing of most events is unknown and occurs at some point between the opening (best-case) and closing (worst-case) times of a bar. Consequently, VBT employs a set of features designed to resample data in the most sensitive way, without looking into the future."
]
},
{
"cell_type": "markdown",
"id": "e964afe5-96c3-4048-96b8-81ed67bdd138",
"metadata": {},
"source": [
"In today's newsletter, we'll use VectorBT PRO to backtest trading on multiple timeframes simultaneously."
]
},
{
"cell_type": "markdown",
"id": "f759738c-4e8e-471b-b668-65962de10b2f",
"metadata": {},
"source": [
"## Imports and set up"
]
},
{
"cell_type": "markdown",
"id": "4b057b37-ca17-4430-bec6-58181442610c",
"metadata": {},
"source": [
"In the newer versions of VBT PRO, the star-import (*) loads all the relevant stuff for us, such as `np` for NumPy."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "369e2d5c-1daa-4c54-ac86-ab91455b6f75",
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *"
]
},
{
"cell_type": "markdown",
"id": "efd90f21-1fa5-4c34-b903-73ec2fb5e163",
"metadata": {},
"source": [
"Configure our graphs to be dark and gap-free."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "765bee33-e30a-4dde-ae59-40c0ab5b5b3d",
"metadata": {},
"outputs": [],
"source": [
"vbt.settings.set_theme(\"dark\")\n",
"vbt.settings.plotting.auto_rangebreaks = True"
]
},
{
"cell_type": "markdown",
"id": "dacf0e6a-061c-4c45-9a2d-edb919cce63e",
"metadata": {},
"source": [
"Grab the data of a higher frequency for your favorite asset. We'll use hourly TSLA."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "16bf34cc-5386-44fa-9af1-7a8969d3f4b9",
"metadata": {},
"outputs": [],
"source": [
"data = vbt.YFData.pull(\"TSLA\", start=\"2023\", end=\"2024\", timeframe=\"hourly\")"
]
},
{
"cell_type": "markdown",
"id": "4681de1a-5101-4df7-90cf-ceffa53a6112",
"metadata": {},
"source": [
"## Multi-timeframe indicators"
]
},
{
"cell_type": "markdown",
"id": "8c7c88cb-43dd-459f-ac82-5a5614e26dd2",
"metadata": {},
"source": [
"Instruct VBT to calculate the fast and slow SMA indicators across multiple timeframes."
]
},
{
"cell_type": "markdown",
"id": "ad633ac3-6820-4d64-ba6f-8a480f3eaeb8",
"metadata": {},
"source": [
"Under the hood, data is first resampled to the target timeframe; then, the actual TA-Lib indicator is applied exclusively to non-missing values. Finally, the result is realigned back to the original timeframe in a manner that eliminates the possibility of look-ahead bias."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e6780b81-129f-40b3-8708-679c0fc91f29",
"metadata": {},
"outputs": [],
"source": [
"fast_sma = data.run(\n",
" \"talib:sma\", \n",
" timeframe=[\"1h\", \"4h\", \"1d\"], \n",
" timeperiod=vbt.Default(20),\n",
" skipna=True\n",
")\n",
"slow_sma = data.run(\n",
" \"talib:sma\", \n",
" timeframe=[\"1h\", \"4h\", \"1d\"], \n",
" timeperiod=vbt.Default(50),\n",
" skipna=True\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d6ad5284-3fc3-4340-b925-28ac252bde69",
"metadata": {},
"source": [
"The result of each call is a DataFrame with three columns, one for each timeframe."
]
},
{
"cell_type": "markdown",
"id": "f855ae03-5539-4d4d-8bd3-fe2709a33258",
"metadata": {},
"source": [
"If we plot the DataFrame, we'll observe that the line corresponding to the highest frequency is smooth, whereas the line representing the lowest frequency appears stepped since the indicator values are updated less frequently."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ec9149ca-2c71-4b4a-97e5-6970cd3b0f44",
"metadata": {},
"outputs": [],
"source": [
"fast_sma.real.vbt.plot().show_svg()"
]
},
{
"cell_type": "markdown",
"id": "4ac4ea89-6cc9-48e6-91a6-522fd4922279",
"metadata": {},
"source": [
"## Unified portfolio"
]
},
{
"cell_type": "markdown",
"id": "015840e4-cd91-4fb3-97cc-389d39cf9e9d",
"metadata": {},
"source": [
"Next, we'll set up a portfolio in which we go long whenever the fast SMA crosses above the slow SMA and go short when the opposite occurs, across each timeframe."
]
},
{
"cell_type": "markdown",
"id": "6d998b85-66d7-4149-8160-ee5fff38dc6f",
"metadata": {},
"source": [
"However, since hourly signals occur more frequently than daily signals, we'll allocate less capital to more frequent signals. For instance, we'll allocate 5% of the equity to hourly signals, 10% to 4-hour signals, and 20% to daily signals."
]
},
{
"cell_type": "markdown",
"id": "9aa20269-769d-48ec-ba70-da1c73b508bb",
"metadata": {},
"source": [
"We'll begin with a cash balance of $10,000, shared across all timeframes. Additionally, we'll implement a 20% trailing stop loss (TSL)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8a8eb991-51de-46de-8650-06e1d1c141d2",
"metadata": {},
"outputs": [],
"source": [
"pf = vbt.PF.from_signals(\n",
" data, \n",
" long_entries=fast_sma.real_crossed_above(slow_sma), \n",
" short_entries=fast_sma.real_crossed_below(slow_sma), \n",
" size=[[0.05, 0.1, 0.2]],\n",
" size_type=\"valuepercent\",\n",
" init_cash=10_000,\n",
" group_by=[\"pf\", \"pf\", \"pf\"],\n",
" cash_sharing=True,\n",
" tsl_stop=0.2\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d613367f-2eab-4b8e-8fb0-3ae9b4c278e6",
"metadata": {},
"source": [
"Plot the cumulative return for each timeframe and compare these to the cumulative return of the entire portfolio."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d39b31f1-3f88-458b-ac20-88ed7c383bdb",
"metadata": {},
"outputs": [],
"source": [
"fig = pf.get_cumulative_returns().vbt.plot(trace_kwargs=dict(line_color=\"gray\", line_dash=\"dot\"))\n",
"fig = pf.get_cumulative_returns(group_by=False).vbt.plot(fig=fig)\n",
"fig.show_svg()"
]
},
{
"cell_type": "markdown",
"id": "9ab33146-d1ac-48ad-bd0b-948c67342dd6",
"metadata": {},
"source": [
"To delve deeper into one of the timeframes, we can plot the indicators alongside the executed trade signals."
]
},
{
"cell_type": "markdown",
"id": "5c0f7a7c-8a40-4528-869a-a2ffdfd60735",
"metadata": {},
"source": [
"Here, we can observe that the majority of positions on the daily timeframe were closed out by the TSL."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c0dbd82e-1b70-4373-904f-a023d9983782",
"metadata": {},
"outputs": [],
"source": [
"fig = fast_sma.real.vbt.plot(column=\"1d\", trace_kwargs=dict(name=\"Fast\", line_color=\"limegreen\"))\n",
"fig = slow_sma.real.vbt.plot(column=\"1d\", trace_kwargs=dict(name=\"Slow\", line_color=\"orangered\"), fig=fig)\n",
"fig = pf.plot_trade_signals(column=\"1d\", fig=fig)\n",
"fig.show_svg()"
]
},
{
"cell_type": "markdown",
"id": "9f1fd30d-a43a-4435-a64c-35eb98c40b6c",
"metadata": {},
"source": [
"## Timeframe product"
]
},
{
"cell_type": "markdown",
"id": "989f08bb-14b2-4630-84a7-126920c69918",
"metadata": {},
"source": [
"Since our MTF indicators share the same index, we can combine one timeframe with another. For instance, we can generate signals from the crossover of two timeframes and identify the pair of timeframes that yield the highest expectancy."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "05d70299-4215-49bd-82ec-d52dbc29f678",
"metadata": {},
"outputs": [],
"source": [
"fast_sma_real = fast_sma.real.vbt.rename_levels({\"sma_timeframe\": \"fast_sma_timeframe\"})\n",
"slow_sma_real = slow_sma.real.vbt.rename_levels({\"sma_timeframe\": \"slow_sma_timeframe\"})\n",
"fast_sma_real, slow_sma_real = fast_sma_real.vbt.x(slow_sma_real)\n",
"long_entries = fast_sma_real.vbt.crossed_above(slow_sma_real)\n",
"short_entries = fast_sma_real.vbt.crossed_below(slow_sma_real)\n",
"pf = vbt.PF.from_signals(data, long_entries=long_entries, short_entries=short_entries)\n",
"pf.trades.expectancy.sort_values(ascending=False)"
]
},
{
"cell_type": "markdown",
"id": "2e573717-8f4c-402b-aef6-cab06f9df044",
"metadata": {},
"source": [
"## Next steps"
]
},
{
"cell_type": "markdown",
"id": "9b042ef0-9041-4722-bde7-bea35c9ca2ec",
"metadata": {},
"source": [
"Timeframe is yet another parameter of your strategy that can be tweaked. For example, you can go to uncharted territory and test more unconventional timeframes like \"1h 30min\" to discover potentially novel insights. Similar to other parameters, timeframes should also undergo cross-validation."
]
},
{
"cell_type": "markdown",
"id": "9233c24e-59f5-4947-b1d5-5c4b5c409ebe",
"metadata": {},
"source": [
"However, unlike regular parameters, timeframes should be regarded as a distinct dimension that provides a unique perspective on your strategy."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9395716f-2f53-4e0a-a48b-205bdbe6c1fd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,316 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "b9d33f66-67f4-483f-9613-87b3298a8fc1",
"metadata": {},
"source": [
"# How to cross-validate a parameterized trading strategy"
]
},
{
"cell_type": "markdown",
"id": "23954d08-d7cf-43ec-84b2-cd6dbbfc5c15",
"metadata": {},
"source": [
"Trading strategies often rely on parameters. Enhancing and effectively cross-validating these parameters can provide a competitive advantage in the market. However, creating a reliable cross-validation schema is challenging due to risks like look-ahead bias and other pitfalls that can lead to overestimating a strategy's performance. With [VectorBT PRO](https://vectorbt.pro/), you can easily access and implement a variety of sophisticated cross-validation methods with just a few lines of code."
]
},
{
"cell_type": "markdown",
"id": "d522552e-dd90-46de-a579-6df68154f91b",
"metadata": {},
"source": [
"## Imports and data"
]
},
{
"cell_type": "markdown",
"id": "1d482486-ce8d-4474-8623-7a08dfba157c",
"metadata": {},
"source": [
"Let's import VBT PRO and the few libraries relevant for our analysis."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "039c1b36-bb5a-4272-8887-d2ff7770184e",
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *\n",
"# whats_imported()\n",
"\n",
"vbt.settings.set_theme(\"dark\")"
]
},
{
"cell_type": "markdown",
"id": "f0b6912a-09c7-4c72-b381-c35135ca627f",
"metadata": {},
"source": [
"The first step involves acquiring data."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1f432668-8698-4c21-a9b4-d8c4c40972e9",
"metadata": {},
"outputs": [],
"source": [
"SYMBOL = \"AAPL\"\n",
"START = \"2010\"\n",
"END = \"now\"\n",
"TIMEFRAME = \"day\"\n",
"\n",
"data = vbt.YFData.pull(SYMBOL, start=START, end=END, timeframe=TIMEFRAME)"
]
},
{
"cell_type": "markdown",
"id": "9058618c-6395-4bbc-8e7a-b0cdc6394a85",
"metadata": {},
"source": [
"## Cross-validation schema"
]
},
{
"cell_type": "markdown",
"id": "ea50af21-ff28-4d40-870b-7fcdf6e155e1",
"metadata": {},
"source": [
"Next, we'll set up a \"splitter,\" which divides a date range into smaller segments according to a chosen schema. For instance, let's allocate 12 months for training data and another 12 months for testing data, with this cycle repeating every 3 months."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc405ebe-ae7d-4687-8470-7d1dbf91aad6",
"metadata": {},
"outputs": [],
"source": [
"TRAIN = 12\n",
"TEST = 12\n",
"EVERY = 3\n",
"OFFSET = vbt.offset(\"M\")\n",
"\n",
"splitter = vbt.Splitter.from_ranges(\n",
" data.index, \n",
" every=EVERY * OFFSET, \n",
" lookback_period=(TRAIN + TEST) * OFFSET,\n",
" split=(\n",
" vbt.RepFunc(lambda index: index < index[0] + TRAIN * OFFSET),\n",
" vbt.RepFunc(lambda index: index >= index[0] + TRAIN * OFFSET),\n",
" ),\n",
" set_labels=[\"train\", \"test\"]\n",
")\n",
"splitter.plots().show_svg()"
]
},
{
"cell_type": "markdown",
"id": "7a749de2-6f58-4246-b308-ad64c6b03f90",
"metadata": {},
"source": [
"In the first subplot, we see that each split (or row) contains adjacent training and testing sets, progressively rolling from past to present. The second subplot illustrates the overlap of each data point across different ranges. Tip: For non-overlapping testing sets, use the setting `EVERY = TRAIN`."
]
},
{
"cell_type": "markdown",
"id": "ab6ed776-f682-43c8-a6d7-489101f82c70",
"metadata": {},
"source": [
"## Objective function"
]
},
{
"cell_type": "markdown",
"id": "9ea17ccf-77e0-4d6c-af87-4c2cdb788d4f",
"metadata": {},
"source": [
"Next, we'll create a function to execute a trading strategy within a specified date range using a single parameter set, returning one key metric. Our strategy will be a simple EMA crossover combined with an ATR trailing stop."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b6a0ac65-5336-4600-870d-dd9202ef78ec",
"metadata": {},
"outputs": [],
"source": [
"def objective(data, fast_period=10, slow_period=20, atr_period=14, atr_mult=3):\n",
" fast_ema = data.run(\"talib:ema\", fast_period, short_name=\"fast_ema\", unpack=True)\n",
" slow_ema = data.run(\"talib:ema\", slow_period, short_name=\"slow_ema\", unpack=True)\n",
" atr = data.run(\"talib:atr\", atr_period, unpack=True)\n",
" pf = vbt.PF.from_signals(\n",
" data, \n",
" entries=fast_ema.vbt.crossed_above(slow_ema), \n",
" exits=fast_ema.vbt.crossed_below(slow_ema), \n",
" tsl_stop=atr * atr_mult, \n",
" save_returns=True,\n",
" freq=TIMEFRAME\n",
" )\n",
" return pf.sharpe_ratio\n",
"\n",
"print(objective(data))"
]
},
{
"cell_type": "markdown",
"id": "dc93d52c-a4e5-4122-a8e6-51ef5d52adbb",
"metadata": {},
"source": [
"## Parameter optimization"
]
},
{
"cell_type": "markdown",
"id": "41b0f21c-e99e-416a-b2da-dcdcbf7e7fed",
"metadata": {},
"source": [
"Let's harness the power of VBT PRO! By decorating (or wrapping) our function with `parameterized`, we enable `objective` to accept a list of parameters and execute them across all combinations. We'll then further enhance the function with another decorator, `split`, which runs the strategy on each date range specified by the splitter. This approach allows us to apply our strategy across every possible date range and parameter combination, compiling the outcomes into a single Pandas Series."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e150f68a-9ce3-432f-81ee-566acdfdd2dc",
"metadata": {},
"outputs": [],
"source": [
"param_objective = vbt.parameterized(\n",
" objective,\n",
" merge_func=\"concat\",\n",
" mono_n_chunks=\"auto\", # merge parameter combinations into chunks\n",
" execute_kwargs=dict(warmup=True, engine=\"pathos\") # run chunks in parallel using Pathos\n",
")\n",
"cv_objective = vbt.split(\n",
" param_objective,\n",
" splitter=splitter, \n",
" takeable_args=[\"data\"], # select date range from data\n",
" merge_func=\"concat\", \n",
")\n",
"\n",
"sharpe_ratio = cv_objective(\n",
" data,\n",
" vbt.Param(np.arange(10, 50), condition=\"slow_period - fast_period >= 5\"),\n",
" vbt.Param(np.arange(10, 50)),\n",
" vbt.Param(np.arange(10, 50), condition=\"fast_period <= atr_period <= slow_period\"),\n",
" vbt.Param(np.arange(2, 5))\n",
")\n",
"print(sharpe_ratio)"
]
},
{
"cell_type": "markdown",
"id": "a5558952-d14d-49a3-9a2d-21152eb3bcdd",
"metadata": {},
"source": [
"We tested over 3 million combinations of date ranges and parameters in just a few minutes."
]
},
{
"cell_type": "markdown",
"id": "dbb436c2-6428-48d3-9c58-67e9293c7333",
"metadata": {},
"source": [
"## Analysis"
]
},
{
"cell_type": "markdown",
"id": "d92c269d-9641-4bf4-a86d-c415343615d5",
"metadata": {},
"source": [
"Let's find out if there's a correlation between the results of the training and testing sets."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9d52a439-cee9-4e6d-9ef6-48dd050e8c7a",
"metadata": {},
"outputs": [],
"source": [
"train_sharpe_ratio = sharpe_ratio.xs(\"train\", level=\"set\")\n",
"test_sharpe_ratio = sharpe_ratio.xs(\"test\", level=\"set\")\n",
"print(train_sharpe_ratio.corr(test_sharpe_ratio))"
]
},
{
"cell_type": "markdown",
"id": "2c2950f5-bd78-4bbc-b78d-bf3bb7f31f02",
"metadata": {},
"source": [
"The analysis indicates a weak negative correlation or no substantial correlation. This suggests that the strategy tends to perform oppositely compared to its results in previous months."
]
},
{
"cell_type": "markdown",
"id": "f565e4ac-2cac-4d81-8b82-8f375333e1ad",
"metadata": {},
"source": [
"And here's an analysis segmented by fast and slow EMA periods. It highlights the minimal variation in the Sharpe ratio from the training to the testing set across at least 50% of the splits, where blue indicates a positive change."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "58be0727-3e27-45bf-bca9-de24674eb5ea",
"metadata": {},
"outputs": [],
"source": [
"sharpe_ratio_diff = test_sharpe_ratio - train_sharpe_ratio\n",
"sharpe_ratio_diff_median = sharpe_ratio_diff.groupby([\"fast_period\", \"slow_period\"]).median()\n",
"sharpe_ratio_diff_median.vbt.heatmap(trace_kwargs=dict(colorscale=\"RdBu\")).show_svg()"
]
},
{
"cell_type": "markdown",
"id": "c5423a9e-289e-421f-886e-753577b4ba13",
"metadata": {},
"source": [
"## Conclusion"
]
},
{
"cell_type": "markdown",
"id": "cfdc909c-6559-4eba-801c-d0792068aba2",
"metadata": {},
"source": [
"Although you might have developed a promising strategy on paper, cross-validating it is essential to confirm its consistent performance over time and to ensure it's not merely a result of random fluctuations."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c1eb4e7a-4698-4c2c-896f-1c8d18a4c3ee",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,499 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "a7f13baa-bf3a-41e2-b4f4-bef957746b6a",
"metadata": {},
"source": [
"# How to backtest chart patterns with VectorBT PRO"
]
},
{
"cell_type": "markdown",
"id": "1e5237a6-cb1e-42b2-8b74-841af2e8859a",
"metadata": {},
"source": [
"VectorBT PRO (https://vectorbt.pro/) is a proprietary Python package designed for backtesting and analyzing quantitative trading strategies. It provides a comprehensive suite of tools for every stage of an algorithmic trading workflow, including data acquisition, signal generation and analysis, portfolio optimization, strategy simulation, hyperparameter tuning, and cross-validation. These modular components empower users to flexibly customize their analysis, setting it apart from monolithic backtesting frameworks."
]
},
{
"cell_type": "markdown",
"id": "51ad2b2b-3ffa-4600-9f03-547f83d8babb",
"metadata": {},
"source": [
"One of these components is a data pattern detector that efficiently scans data using variable-length windows, assessing their similarity to a specified pattern. This process, optimized with Numba (https://numba.pydata.org/), operates on any hardware without the need for machine learning. To showcase the detector's capabilities, we will conduct backtesting on a range of patterns and their combinations on a single dataset."
]
},
{
"cell_type": "markdown",
"id": "36f9e6a9-eedf-4595-b214-2d00f02d9c90",
"metadata": {},
"source": [
"## Imports and set up"
]
},
{
"cell_type": "markdown",
"id": "33459b0c-c21f-4251-b13b-6492c9171f6c",
"metadata": {},
"source": [
"Due to VectorBT PRO's self-contained design, only minimal imports are necessary."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bc8d53d7-0290-4e6c-b760-6c9ba8a6873e",
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *\n",
"# whats_imported()\n",
"\n",
"vbt.settings.set_theme(\"dark\")"
]
},
{
"cell_type": "markdown",
"id": "0aec0980-6ee2-41b1-a713-4a062a823fe5",
"metadata": {},
"source": [
"VectorBT PRO features built-in data downloading from sources such as Yahoo Finance, Alpaca, Polygon, TradingView, and many more. We will perform pattern detection on hourly price data pulled from TradingView."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d12bcb35-95ba-424e-8dfc-0e9edff8df99",
"metadata": {},
"outputs": [],
"source": [
"symbols = [\n",
" \"NASDAQ:META\",\n",
" \"NASDAQ:AMZN\",\n",
" \"NASDAQ:AAPL\",\n",
" \"NASDAQ:NFLX\",\n",
" \"NASDAQ:GOOG\",\n",
"]\n",
"\n",
"data = vbt.TVData.pull(symbols, timeframe=\"hourly\")"
]
},
{
"cell_type": "markdown",
"id": "77e48d78-436d-4a52-95d4-8ff8c1e8ff4c",
"metadata": {},
"source": [
"TradingView does not offer the option to specify a date range in advance, so we will need to select it afterward."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b96e37aa-3c00-4373-8030-ca3d97f872b1",
"metadata": {},
"outputs": [],
"source": [
"start_date = \"2020\"\n",
"end_date = None\n",
"\n",
"data = data.xloc[start_date:end_date]"
]
},
{
"cell_type": "markdown",
"id": "3c9c8009-3a78-4799-bc98-2bd191e22851",
"metadata": {},
"source": [
"Ensure that our data spans the correct date period and is free of NaN values."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "76b530eb-f42e-4bdf-b270-20298a66eb6b",
"metadata": {},
"outputs": [],
"source": [
"print(data.stats())"
]
},
{
"cell_type": "markdown",
"id": "4cf31468-ce25-4284-b0c6-dec873e62268",
"metadata": {},
"source": [
"As pattern detection requires only a single time series, we must choose the suitable feature. We'll utilize HLC/3, which effectively captures price fluctuations."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "650c3662-684c-4e57-b7fa-45ba8b2f7f1d",
"metadata": {},
"outputs": [],
"source": [
"price = data.hlc3"
]
},
{
"cell_type": "markdown",
"id": "74dcad43-dd64-435a-9a9d-591681514209",
"metadata": {},
"source": [
"## Define patterns"
]
},
{
"cell_type": "markdown",
"id": "f387c42a-1224-46d9-9397-b6479e6e21e7",
"metadata": {},
"source": [
"Numerous chart patterns can be translated into numerical sequences, like the \"Double Top\" pattern (https://www.investopedia.com/terms/d/doubletop.asp) represented as [1, 3, 2, 3, 1]. It's important to note that while the numbers themselves can be arbitrary, their relative spacing should mirror the relative distance between the pattern's chart points. For instance, in this sequence, 2 aligns with the midpoint between valley point 1 and peak point 3. The same principle applies to temporal distribution: points should be equidistant from one another."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "40827a2a-ee12-4feb-9f6a-4505ed24060d",
"metadata": {},
"outputs": [],
"source": [
"bullish_patterns = {\n",
" \"double_bottom\": [5, 1, 3, 1, 5],\n",
" \"exp_triangle\": [3, 4, 2, 5, 1, 6],\n",
" \"asc_triangle\": [1, 5, 2, 5, 3, 6],\n",
" \"symm_triangle\": [1, 6, 2, 5, 3, 6],\n",
" \"pennant\": [6, 1, 5, 2, 4, 3, 6]\n",
"}\n",
"bearish_patterns = {\n",
" \"head_and_shoulders\": [1, 4, 2, 6, 2, 4, 1],\n",
" \"double_top\": [1, 5, 3, 5, 1],\n",
" \"desc_triangle\": [6, 2, 5, 2, 4, 1],\n",
" \"symm_triangle\": [6, 1, 5, 2, 4, 1],\n",
" \"pennant\": [1, 6, 2, 5, 3, 4, 1]\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "af76a114-d588-443a-8c62-19274c97c416",
"metadata": {},
"source": [
"Confirm the visual representation of a pattern by plotting its corresponding line graph."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0114e669-fff0-48b1-922b-412ad6941914",
"metadata": {},
"outputs": [],
"source": [
"pd.Series(bullish_patterns[\"double_bottom\"]).vbt.plot().show_svg()"
]
},
{
"cell_type": "markdown",
"id": "11172c01-2675-4c12-ab51-ae21137c097a",
"metadata": {},
"source": [
"Each generated sequence serves as a rough approximation of the desired chart pattern, and there's no need for precise adjustments: VectorBT PRO's similarity-based algorithm is flexible and can identify patterns, even if they are not perfectly consistent in their design."
]
},
{
"cell_type": "markdown",
"id": "4292665d-4168-436a-a59d-94b42bfd9482",
"metadata": {},
"source": [
"## Detect patterns in data"
]
},
{
"cell_type": "markdown",
"id": "0a355587-347a-4f4f-9f7a-fa041127f36a",
"metadata": {},
"source": [
"Iterate through each pattern, dataset, and timestamp within the dataset. Search for matches within windows spanning from 1 to 30 days, and create a record for each match that exceeds a pre-defined minimum similarity score, which is set by default to 85%."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1a9af95-b7d1-4f29-9a6b-b40d57e5f597",
"metadata": {},
"outputs": [],
"source": [
"min_window = 24\n",
"max_window = 24 * 30\n",
"\n",
"def detect_patterns(patterns):\n",
" return vbt.PatternRanges.from_pattern_search(\n",
" price,\n",
" open=data.open, # OHLC for plotting\n",
" high=data.high,\n",
" low=data.low,\n",
" close=data.close,\n",
" pattern=patterns,\n",
" window=min_window,\n",
" max_window=max_window,\n",
" execute_kwargs=dict( # multithreading\n",
" engine=\"threadpool\", \n",
" chunk_len=\"auto\", \n",
" )\n",
" )\n",
"\n",
"bullish_matches = detect_patterns(vbt.Param(bullish_patterns, name=\"bullish_pattern\"))\n",
"bearish_matches = detect_patterns(vbt.Param(bearish_patterns, name=\"bearish_pattern\"))"
]
},
{
"cell_type": "markdown",
"id": "12733006-548c-4c28-a4ac-902aa066f0b3",
"metadata": {},
"source": [
"In just several minutes, VectorBT PRO seamlessly detected matches among all patterns. This process, involving around 230 million unique pattern and window combinations, was executed in parallel."
]
},
{
"cell_type": "markdown",
"id": "714ddd1f-f5a5-420e-9d4d-707e4b5e4685",
"metadata": {},
"source": [
"Get the number of matches for each pattern and dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "77c5957e-a906-4c0c-998a-5b2e92fd652d",
"metadata": {},
"outputs": [],
"source": [
"print(bullish_matches.count())"
]
},
{
"cell_type": "markdown",
"id": "88b7627b-f48c-4d51-986e-cc269abf9604",
"metadata": {},
"source": [
"Plot the pattern and dataset with the most matches."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "708b0d74-8c5e-4921-87e2-4704050ce7ed",
"metadata": {},
"outputs": [],
"source": [
"vbt.settings.plotting.auto_rangebreaks = True # for stocks\n",
"\n",
"display_column = bullish_matches.count().idxmax()\n",
"\n",
"bullish_matches.plot(column=display_column, fit_ranges=True).show_svg()"
]
},
{
"cell_type": "markdown",
"id": "af6a60cf-0d98-49e5-ad48-cc872f6d2ce9",
"metadata": {},
"source": [
"Zoom in on a match."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f22c6c82-cc7c-4d5d-94b9-e14753e82072",
"metadata": {},
"outputs": [],
"source": [
"display_match = 3\n",
"\n",
"bullish_matches.plot(column=display_column, fit_ranges=display_match).show_svg()"
]
},
{
"cell_type": "markdown",
"id": "45f49c51-bd6f-4952-8ff4-76f6ebc00f7f",
"metadata": {},
"source": [
"The window data closely aligns with the pattern. This functionality is highly comprehensive, offering the flexibility to adjust fitness levels, modify rescaling and interpolation algorithms, and more to suit specific requirements."
]
},
{
"cell_type": "markdown",
"id": "16779944-3cae-44e8-a63d-36194479217c",
"metadata": {},
"source": [
"## Transform matches to signals"
]
},
{
"cell_type": "markdown",
"id": "26c9e03d-95ff-44a3-bd56-2a581673aa27",
"metadata": {},
"source": [
"To conduct backtesting on the identified patterns, we will convert them into signals, triggering a signal once a pattern has fully developed."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ce91720c-dd56-496e-aadb-faad71e1a529",
"metadata": {},
"outputs": [],
"source": [
"entries = bullish_matches.last_pd_mask\n",
"exits = bearish_matches.last_pd_mask"
]
},
{
"cell_type": "markdown",
"id": "d049c224-03b7-42fa-8927-51a502812e54",
"metadata": {},
"source": [
"Generate a Cartesian product of bullish and bearish patterns to systematically test each bullish pattern against each bearish pattern."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a5f0a9db-632d-4705-af3f-1c33dfb6f884",
"metadata": {},
"outputs": [],
"source": [
"entries, exits = entries.vbt.x(exits)"
]
},
{
"cell_type": "markdown",
"id": "90a043fe-c990-4358-94be-b8f4b92dec4f",
"metadata": {},
"source": [
"Both arrays have been converted into equally-shaped DataFrames, each comprising 125 columns. Each column represents an individual backtest, encompassing three parameters: bullish pattern, bearish pattern, and symbol."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b3a1466-246b-42b5-a61d-a0ae1137c54d",
"metadata": {},
"outputs": [],
"source": [
"print(entries.columns)"
]
},
{
"cell_type": "markdown",
"id": "05669332-15a4-4ac5-b376-bdc08006d952",
"metadata": {},
"source": [
"## Backtest signals"
]
},
{
"cell_type": "markdown",
"id": "a44e90d0-f172-445a-9f4b-865444ae0cb3",
"metadata": {},
"source": [
"Establish a portfolio by simulating signals."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5d3ec70d-73e4-407d-8ea7-2a6b0f4436ba",
"metadata": {},
"outputs": [],
"source": [
"pf = vbt.Portfolio.from_signals(data, entries, exits)"
]
},
{
"cell_type": "markdown",
"id": "ff7d821d-f20e-45c3-83d5-ad1aa2ba109b",
"metadata": {},
"source": [
"Get the mean total return for every combination of bullish and bearish patterns."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad8b789d-c543-4e42-8df7-1a351cceda5f",
"metadata": {},
"outputs": [],
"source": [
"mean_total_return = pf.total_return.groupby([\"bullish_pattern\", \"bearish_pattern\"]).mean()\n",
"\n",
"print(mean_total_return)"
]
},
{
"cell_type": "markdown",
"id": "946aa00a-b183-496e-8a63-7f11485ad3dc",
"metadata": {},
"source": [
"As visual beings, let's represent these values as a heatmap."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d74ac965-0461-4563-813e-56b9cce979c8",
"metadata": {},
"outputs": [],
"source": [
"mean_total_return.vbt.heatmap(x_level=\"bearish_pattern\", y_level=\"bullish_pattern\").show_svg()"
]
},
{
"cell_type": "markdown",
"id": "5d9f9706-d3da-480e-8b65-5eaa47196049",
"metadata": {},
"source": [
"Although the displayed performance of each pattern combination does not guarantee future results, it provides insight into how the market responded to pattern events in the past. For instance, it's noteworthy that the \"Bearish Symmetrical Triangle\" exhibited a notably bullish trend. Cross-validation and robustness testing are next essential steps for a comprehensive assessment."
]
},
{
"cell_type": "markdown",
"id": "2b5b8516-8620-41aa-a11c-96b48798c343",
"metadata": {},
"source": [
"Read more at https://vectorbt.pro/tutorials/patterns-and-projections/"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9efd6597-880f-4769-a486-65e17b1c5475",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,266 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9d9b5c91-f3a3-4709-a36f-40ecc86595d6",
"metadata": {},
"source": [
"# Forecasting future price trends by projecting historical price patterns"
]
},
{
"cell_type": "markdown",
"id": "2cffb873-e431-44f3-b243-e35969bbd2c1",
"metadata": {},
"source": [
"In our previous newsletter focusing on VectorBT PRO (VBT), we dived into the pattern detection capabilities of this powerful library. An additional key functionality is VBT's capacity to extrapolate identified price segments into the future and aggregate them for statistical analysis. This feature can be an invaluable tool for real-time decision-making in market analysis."
]
},
{
"cell_type": "markdown",
"id": "c472968b-1863-4d79-a299-ec67c1757455",
"metadata": {},
"source": [
"## Imports and set up"
]
},
{
"cell_type": "markdown",
"id": "ddf68612-622b-4803-87fc-a1ad80341536",
"metadata": {},
"source": [
"Given the self-contained design of VBT, a single import suffices."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a42ccb91-bc73-4ad5-9327-18c7c22af598",
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *\n",
"# whats_imported()\n",
"\n",
"vbt.settings.set_theme(\"dark\")"
]
},
{
"cell_type": "markdown",
"id": "15412fda-c27f-4820-9273-17366164b2b3",
"metadata": {},
"source": [
"Let's define a set of variables for our analysis."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fc016fe0-5ae6-416f-bb4d-84a33a91fce8",
"metadata": {},
"outputs": [],
"source": [
"SYMBOL = \"BTCUSDT\"\n",
"TIMEFRAME = \"1 hour\"\n",
"START = \"one year ago\"\n",
"\n",
"LAST_N_BARS = 24\n",
"PRED_N_BARS = 12\n",
"\n",
"GIF_FNAME = \"projections.gif\"\n",
"GIF_N_BARS = 72\n",
"GIF_FPS = 4\n",
"GIF_PAD = 0.01"
]
},
{
"cell_type": "markdown",
"id": "e4667d70-f1d9-4f34-81ff-fdf8320477ae",
"metadata": {},
"source": [
"We will execute the analysis using price data retrieved from BinanceData, based on the parameters we previously defined."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b797e0ff-320d-456b-91df-1e0e369d83a9",
"metadata": {},
"outputs": [],
"source": [
"data = vbt.BinanceData.pull(SYMBOL, timeframe=TIMEFRAME, start=START)"
]
},
{
"cell_type": "markdown",
"id": "43fade8d-2d1f-492b-88bb-95facd21ceda",
"metadata": {},
"source": [
"## Find and plot projections"
]
},
{
"cell_type": "markdown",
"id": "0013fab2-d1fa-4777-99e9-2081a90444e3",
"metadata": {},
"source": [
"Let's write a function that analyzes the most recent price trend and employs it as a pattern to identify similar price movements in historical data. This pattern recognition function will focus exclusively on segments of price history having a comparable percentage change from their respective starting points."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7f4ead3-c4db-47d5-8a30-3f7dbe4347dc",
"metadata": {},
"outputs": [],
"source": [
"def find_patterns(data):\n",
" price = data.hlc3\n",
" pattern = price.values[-LAST_N_BARS:]\n",
" pattern_ranges = price.vbt.find_pattern(\n",
" pattern=pattern,\n",
" rescale_mode=\"rebase\",\n",
" overlap_mode=\"allow\",\n",
" wrapper_kwargs=dict(freq=TIMEFRAME)\n",
" )\n",
" pattern_ranges = pattern_ranges.status_closed\n",
" return pattern_ranges\n",
"\n",
"pattern_ranges = find_patterns(data)\n",
"print(pattern_ranges.count())"
]
},
{
"cell_type": "markdown",
"id": "6dc1f00c-f0a2-4b74-831f-3043c14f1195",
"metadata": {},
"source": [
"We have identified a number of price segments that closely resemble the latest price trend. Now, we'll write a function that extracts the price data immediately succeeding each identified segment and plots these as extensions of the price trend. These subsequent segments are known as \"projections.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9fb7b02c-190a-488e-bfa6-843db23c324e",
"metadata": {},
"outputs": [],
"source": [
"def plot_projections(data, pattern_ranges, **kwargs):\n",
" projection_ranges = pattern_ranges.with_delta(\n",
" PRED_N_BARS,\n",
" open=data.open,\n",
" high=data.high,\n",
" low=data.low,\n",
" close=data.close,\n",
" )\n",
" projection_ranges = projection_ranges.status_closed\n",
" return projection_ranges.plot_projections(\n",
" plot_past_period=LAST_N_BARS, \n",
" **kwargs,\n",
" )\n",
"\n",
"plot_projections(data, pattern_ranges, plot_bands=False).show_svg()"
]
},
{
"cell_type": "markdown",
"id": "8df73436-c6ae-411b-8c44-e5764f9c1812",
"metadata": {},
"source": [
"As we can see, similar price movements have historically branched into a diverse set of trajectories. For a visually compelling and statistically robust forecast, we will display the confidence bands encompassing all the projections, with 60% of these projections falling between the upper and lower bands."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b97458a5-7428-4877-80c6-a522aef4b5ce",
"metadata": {},
"outputs": [],
"source": [
"plot_projections(data, pattern_ranges, plot_bands=True).show_svg()"
]
},
{
"cell_type": "markdown",
"id": "9011e2c5-1745-480c-b9da-c031f6ba9ae2",
"metadata": {},
"source": [
"## Generate animation"
]
},
{
"cell_type": "markdown",
"id": "ac05a0ea-6883-4736-a815-619f76607966",
"metadata": {},
"source": [
"Lastly, we will compile a GIF animation that iterates through a specified range of bars, applying the aforementioned procedure to each bar within that range."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6238530e-9d06-4da4-a71d-3ae7489c2c9a",
"metadata": {},
"outputs": [],
"source": [
"def plot_frame(frame_index, **kwargs):\n",
" sub_data = data.loc[:frame_index[-1]]\n",
" pattern_ranges = find_patterns(sub_data)\n",
" if pattern_ranges.count() < 3:\n",
" return None\n",
" return plot_projections(sub_data, pattern_ranges, **kwargs)\n",
"\n",
"vbt.save_animation(\n",
" GIF_FNAME,\n",
" data.index[-GIF_N_BARS:],\n",
" plot_frame,\n",
" plot_projections=False,\n",
" delta=1,\n",
" fps=GIF_FPS,\n",
" writer_kwargs=dict(loop=0),\n",
" yaxis_range=[\n",
" data.low.iloc[-GIF_N_BARS:].min() * (1 - GIF_PAD), \n",
" data.high.iloc[-GIF_N_BARS:].max() * (1 + GIF_PAD)\n",
" ],\n",
")"
]
},
{
"cell_type": "markdown",
"id": "91b825fb-7e4c-4d48-ae73-bffe633a6f52",
"metadata": {},
"source": [
"Bear in mind that while the confidence bands describe past performance, they should not be used as guarantees of future results."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "319a24bb-e210-4d02-ab2c-0ce58b3dc82c",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,771 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Notebook for the article [Stop Loss, Trailing Stop, or Take Profit? 2 Million Backtests Shed Light](https://polakowo.medium.com/stop-loss-trailing-stop-or-take-profit-2-million-backtests-shed-light-dde23bda40be)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *\n",
"# whats_imported()\n",
"\n",
"import ipywidgets\n",
"\n",
"vbt.settings.set_theme('dark')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"seed = 42\n",
"symbols = [\n",
" \"BTC-USD\", \"ETH-USD\", \"XRP-USD\", \"BCH-USD\", \"LTC-USD\", \n",
" \"BNB-USD\", \"EOS-USD\", \"XLM-USD\", \"XMR-USD\", \"ADA-USD\"\n",
"]\n",
"start_date = vbt.utc_timestamp(\"2018-01-01\")\n",
"end_date = vbt.utc_timestamp(\"2021-01-01\")\n",
"time_delta = end_date - start_date\n",
"window_len = vbt.timedelta(\"180d\")\n",
"window_cnt = 400\n",
"exit_types = [\"SL\", \"TS\", \"TP\", \"Random\", \"Holding\"]\n",
"step = 0.01\n",
"stops = np.arange(step, 1 + step, step)\n",
"\n",
"vbt.settings.wrapping[\"freq\"] = \"d\"\n",
"vbt.settings.plotting[\"layout\"][\"template\"] = \"vbt_dark\"\n",
"vbt.settings.portfolio[\"init_cash\"] = 100.\n",
"\n",
"print(pd.Series({\n",
" \"Start date\": start_date,\n",
" \"End date\": end_date,\n",
" \"Time period (days)\": time_delta.days,\n",
" \"Assets\": len(symbols),\n",
" \"Window length\": window_len,\n",
" \"Windows\": window_cnt,\n",
" \"Exit types\": len(exit_types),\n",
" \"Stop values\": len(stops),\n",
" \"Tests per asset\": window_cnt * len(stops) * len(exit_types),\n",
" \"Tests per window\": len(symbols) * len(stops) * len(exit_types),\n",
" \"Tests per exit type\": len(symbols) * window_cnt * len(stops),\n",
" \"Tests per stop type and value\": len(symbols) * window_cnt,\n",
" \"Tests total\": len(symbols) * window_cnt * len(stops) * len(exit_types)\n",
"}))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cols = [\"Open\", \"Low\", \"High\", \"Close\", \"Volume\"]\n",
"yfdata = vbt.YFData.pull(symbols, start=start_date, end=end_date)\n",
"\n",
"print(yfdata.data.keys())\n",
"print(yfdata.data[\"BTC-USD\"].shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"yfdata.plot(symbol=\"BTC-USD\").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ohlcv = yfdata.concat()\n",
"\n",
"print(ohlcv.keys())\n",
"print(ohlcv[\"Open\"].shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"splitter = vbt.Splitter.from_n_rolling(\n",
" ohlcv[\"Open\"].index, \n",
" n=window_cnt,\n",
" length=window_len.days\n",
")\n",
"\n",
"split_ohlcv = {}\n",
"for k, v in ohlcv.items():\n",
" split_ohlcv[k] = splitter.take(v, into=\"reset_stacked\")\n",
"print(split_ohlcv[\"Open\"].shape)\n",
"\n",
"split_indexes = splitter.take(ohlcv[\"Open\"].index)\n",
"print(split_indexes)\n",
"print(split_indexes[10])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(split_ohlcv[\"Open\"].columns)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"entries = pd.DataFrame.vbt.signals.empty_like(split_ohlcv[\"Open\"])\n",
"entries.iloc[0, :] = True\n",
"\n",
"print(entries.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# We use OHLCSTX instead of built-in stop-loss in Portfolio.from_signals\n",
"# because we want to analyze signals before simulation + it's easier to construct param grids\n",
"# For reality check, run the same setup using Portfolio.from_signals alone\n",
"\n",
"sl_ohlcstx = vbt.OHLCSTX.run(\n",
" entries, \n",
" entry_price=split_ohlcv[\"Close\"], \n",
" open=split_ohlcv[\"Open\"], \n",
" high=split_ohlcv[\"High\"], \n",
" low=split_ohlcv[\"Low\"], \n",
" close=split_ohlcv[\"Close\"], \n",
" sl_stop=list(stops),\n",
" stop_type=None\n",
")\n",
"sl_exits = sl_ohlcstx.exits.copy()\n",
"sl_price = sl_ohlcstx.close.copy()\n",
"sl_price[sl_exits] = sl_ohlcstx.stop_price\n",
"del sl_ohlcstx\n",
"\n",
"print(sl_exits.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tsl_ohlcstx = vbt.OHLCSTX.run(\n",
" entries, \n",
" entry_price=split_ohlcv[\"Close\"], \n",
" open=split_ohlcv[\"Open\"], \n",
" high=split_ohlcv[\"High\"], \n",
" low=split_ohlcv[\"Low\"], \n",
" close=split_ohlcv[\"Close\"], \n",
" tsl_stop=list(stops),\n",
" stop_type=None\n",
")\n",
"tsl_exits = tsl_ohlcstx.exits.copy()\n",
"tsl_price = tsl_ohlcstx.close.copy()\n",
"tsl_price[tsl_exits] = tsl_ohlcstx.stop_price\n",
"del tsl_ohlcstx\n",
"\n",
"print(tsl_exits.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tp_ohlcstx = vbt.OHLCSTX.run(\n",
" entries, \n",
" entry_price=split_ohlcv[\"Close\"], \n",
" open=split_ohlcv[\"Open\"], \n",
" high=split_ohlcv[\"High\"], \n",
" low=split_ohlcv[\"Low\"], \n",
" close=split_ohlcv[\"Close\"], \n",
" tp_stop=list(stops),\n",
" stop_type=None\n",
")\n",
"tp_exits = tp_ohlcstx.exits.copy()\n",
"tp_price = tp_ohlcstx.close.copy()\n",
"tp_price[tp_exits] = tp_ohlcstx.stop_price\n",
"del tp_ohlcstx\n",
"\n",
"print(tp_exits.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def rename_stop_level(df):\n",
" return df.vbt.rename_levels({\n",
" \"ohlcstx_sl_stop\": \"stop_value\",\n",
" \"ohlcstx_tsl_stop\": \"stop_value\",\n",
" \"ohlcstx_tp_stop\": \"stop_value\"\n",
" }, strict=False)\n",
"\n",
"sl_exits = rename_stop_level(sl_exits)\n",
"tsl_exits = rename_stop_level(tsl_exits)\n",
"tp_exits = rename_stop_level(tp_exits)\n",
"\n",
"sl_price = rename_stop_level(sl_price)\n",
"tsl_price = rename_stop_level(tsl_price)\n",
"tp_price = rename_stop_level(tp_price)\n",
"\n",
"print(sl_exits.columns)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(pd.Series({\n",
" \"SL\": sl_exits.vbt.signals.total().mean(),\n",
" \"TS\": tsl_exits.vbt.signals.total().mean(),\n",
" \"TP\": tp_exits.vbt.signals.total().mean()\n",
"}, name=\"avg_num_signals\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def groupby_stop_value(df):\n",
" return df.vbt.signals.total().groupby(\"stop_value\").mean()\n",
"\n",
"pd.DataFrame({\n",
" \"Stop Loss\": groupby_stop_value(sl_exits),\n",
" \"Trailing Stop\": groupby_stop_value(tsl_exits),\n",
" \"Take Profit\": groupby_stop_value(tp_exits)\n",
"}).vbt.plot(\n",
" xaxis_title=\"Stop value\", \n",
" yaxis_title=\"Avg number of signals\"\n",
").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sl_exits.iloc[-1, :] = True\n",
"tsl_exits.iloc[-1, :] = True\n",
"tp_exits.iloc[-1, :] = True\n",
"\n",
"sl_exits = sl_exits.vbt.signals.first_after(entries)\n",
"tsl_exits = tsl_exits.vbt.signals.first_after(entries)\n",
"tp_exits = tp_exits.vbt.signals.first_after(entries)\n",
"\n",
"print(pd.Series({\n",
" \"SL\": sl_exits.vbt.signals.total().mean(),\n",
" \"TS\": tsl_exits.vbt.signals.total().mean(),\n",
" \"TP\": tp_exits.vbt.signals.total().mean()\n",
"}, name=\"avg_num_signals\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"hold_exits = pd.DataFrame.vbt.signals.empty_like(sl_exits)\n",
"hold_exits.iloc[-1, :] = True\n",
"hold_price = vbt.broadcast_to(split_ohlcv[\"Close\"], sl_price)\n",
"\n",
"print(hold_exits.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"rand_exits = hold_exits.vbt.shuffle(seed=seed)\n",
"rand_price = hold_price\n",
"\n",
"print(rand_exits.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"exits = pd.DataFrame.vbt.concat(\n",
" sl_exits, \n",
" tsl_exits, \n",
" tp_exits, \n",
" rand_exits, \n",
" hold_exits, \n",
" keys=pd.Index(exit_types, name=\"exit_type\")\n",
")\n",
"del sl_exits\n",
"del tsl_exits\n",
"del tp_exits\n",
"del rand_exits\n",
"del hold_exits\n",
"\n",
"print(exits.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"price = pd.DataFrame.vbt.concat(\n",
" sl_price, \n",
" tsl_price, \n",
" tp_price, \n",
" rand_price, \n",
" hold_price, \n",
" keys=pd.Index(exit_types, name=\"exit_type\")\n",
")\n",
"del sl_price\n",
"del tsl_price\n",
"del tp_price\n",
"del rand_price\n",
"del hold_price\n",
"\n",
"print(price.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(exits.columns)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(exits.vbt.getsize())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(price.vbt.getsize())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"avg_distance = entries.vbt.signals.between_ranges(target=exits)\\\n",
" .duration.mean()\\\n",
" .groupby([\"exit_type\", \"stop_value\"])\\\n",
" .mean()\\\n",
" .unstack(level=\"exit_type\")\n",
"\n",
"print(avg_distance.mean())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"avg_distance[exit_types].vbt.plot(\n",
" xaxis_title=\"Stop value\", \n",
" yaxis_title=\"Avg distance to entry\"\n",
").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%time\n",
"pf = vbt.Portfolio.from_signals(\n",
" split_ohlcv[\"Close\"], \n",
" entries, \n",
" exits, \n",
" price=price\n",
")\n",
"\n",
"print(len(pf.orders))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"total_return = pf.total_return\n",
"del pf\n",
"\n",
"print(total_return.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import gc\n",
"\n",
"total_returns = []\n",
"for i in vbt.ProgressBar(range(len(exit_types))):\n",
" chunk_mask = exits.columns.get_level_values(\"exit_type\") == exit_types[i]\n",
" chunk_pf = vbt.Portfolio.from_signals(\n",
" split_ohlcv[\"Close\"], \n",
" entries, \n",
" exits.loc[:, chunk_mask],\n",
" price=price.loc[:, chunk_mask]\n",
" )\n",
" total_returns.append(chunk_pf.total_return)\n",
" \n",
" del chunk_pf\n",
" gc.collect()\n",
" \n",
"total_return = pd.concat(total_returns)\n",
"\n",
"print(total_return.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"return_by_type = total_return.unstack(level=\"exit_type\")[exit_types]\n",
"\n",
"print(return_by_type[\"Holding\"].describe(percentiles=[]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"purple_color = vbt.settings[\"plotting\"][\"color_schema\"][\"purple\"]\n",
"return_by_type[\"Holding\"].vbt.histplot(\n",
" xaxis_title=\"Total return\",\n",
" xaxis_tickformat=\".2%\",\n",
" yaxis_title=\"Count\",\n",
" trace_kwargs=dict(marker_color=purple_color)\n",
").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(pd.DataFrame({\n",
" \"Mean\": return_by_type.mean(),\n",
" \"Median\": return_by_type.median(),\n",
" \"Std\": return_by_type.std(),\n",
"}))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"return_by_type.vbt.boxplot(\n",
" trace_kwargs=dict(boxpoints=False),\n",
" yaxis_title=\"Total return\",\n",
" yaxis_tickformat=\".2%\"\n",
").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print((return_by_type > 0).mean().rename(\"win_rate\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"init_cash = vbt.settings.portfolio[\"init_cash\"]\n",
"\n",
"def get_expectancy(return_by_type, level_name):\n",
" grouped = return_by_type.groupby(level_name, axis=0)\n",
" win_rate = grouped.apply(lambda x: (x > 0).mean())\n",
" avg_win = grouped.apply(lambda x: init_cash * x[x > 0].mean())\n",
" avg_win = avg_win.fillna(0)\n",
" avg_loss = grouped.apply(lambda x: init_cash * x[x < 0].mean())\n",
" avg_loss = avg_loss.fillna(0)\n",
" return win_rate * avg_win - (1 - win_rate) * np.abs(avg_loss)\n",
" \n",
"expectancy_by_stop = get_expectancy(return_by_type, \"stop_value\")\n",
"\n",
"print(expectancy_by_stop.mean())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"expectancy_by_stop.vbt.plot(\n",
" xaxis_title=\"Stop value\", \n",
" yaxis_title=\"Expectancy\"\n",
").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"return_values = np.sort(return_by_type[\"Holding\"].values)\n",
"idxs = np.ceil(np.linspace(0, len(return_values) - 1, 21)).astype(int)\n",
"bins = return_values[idxs][:-1]\n",
"\n",
"def bin_return(return_by_type):\n",
" classes = pd.cut(return_by_type[\"Holding\"], bins=bins, right=True)\n",
" new_level = np.array(classes.apply(lambda x: x.right))\n",
" new_level = pd.Index(new_level, name=\"bin_right\")\n",
" return return_by_type.vbt.add_levels(new_level, axis=0)\n",
"\n",
"binned_return_by_type = bin_return(return_by_type)\n",
"\n",
"expectancy_by_bin = get_expectancy(binned_return_by_type, \"bin_right\")\n",
"\n",
"expectancy_by_bin.vbt.plot(\n",
" trace_kwargs=dict(mode=\"lines\"),\n",
" xaxis_title=\"Total return of holding\",\n",
" xaxis_tickformat=\".2%\",\n",
" yaxis_title=\"Expectancy\"\n",
").show_svg()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"range_starts = pd.DatetimeIndex(list(map(lambda x: x[0], split_indexes)))\n",
"range_ends = pd.DatetimeIndex(list(map(lambda x: x[-1], split_indexes)))\n",
"\n",
"symbol_lvl = return_by_type.index.get_level_values(\"symbol\")\n",
"split_lvl = return_by_type.index.get_level_values(\"split\")\n",
"range_start_lvl = range_starts[split_lvl]\n",
"range_end_lvl = range_ends[split_lvl]\n",
"\n",
"asset_multi_select = ipywidgets.SelectMultiple(\n",
" options=symbols,\n",
" value=symbols,\n",
" rows=len(symbols),\n",
" description=\"Symbols\"\n",
")\n",
"dates = np.unique(yfdata.wrapper.index)\n",
"date_range_slider = ipywidgets.SelectionRangeSlider(\n",
" options=dates,\n",
" index=(0, len(dates)-1),\n",
" orientation=\"horizontal\",\n",
" readout=False,\n",
" continuous_update=False\n",
")\n",
"range_start_label = ipywidgets.Label()\n",
"range_end_label = ipywidgets.Label()\n",
"metric_dropdown = ipywidgets.Dropdown(\n",
" options=[\"Mean\", \"Median\", \"Win Rate\", \"Expectancy\"],\n",
" value=\"Expectancy\"\n",
")\n",
"stop_scatter = vbt.Scatter(\n",
" trace_names=exit_types,\n",
" x_labels=stops, \n",
" xaxis_title=\"Stop value\", \n",
" yaxis_title=\"Expectancy\"\n",
")\n",
"stop_scatter_img = ipywidgets.Image(\n",
" format=\"png\",\n",
" width=stop_scatter.fig.layout.width,\n",
" height=stop_scatter.fig.layout.height\n",
")\n",
"bin_scatter = vbt.Scatter(\n",
" trace_names=exit_types,\n",
" x_labels=expectancy_by_bin.index, \n",
" trace_kwargs=dict(mode=\"lines\"),\n",
" xaxis_title=\"Total return of holding\",\n",
" xaxis_tickformat=\"%\",\n",
" yaxis_title=\"Expectancy\"\n",
")\n",
"bin_scatter_img = ipywidgets.Image(\n",
" format=\"png\",\n",
" width=bin_scatter.fig.layout.width,\n",
" height=bin_scatter.fig.layout.height\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def update_scatter(*args, **kwargs):\n",
" _symbols = asset_multi_select.value\n",
" _from = date_range_slider.value[0]\n",
" _to = date_range_slider.value[1]\n",
" _metric_name = metric_dropdown.value\n",
" \n",
" range_mask = (range_start_lvl >= _from) & (range_end_lvl <= _to)\n",
" asset_mask = symbol_lvl.isin(_symbols)\n",
" filt = return_by_type[range_mask & asset_mask]\n",
" \n",
" filt_binned = bin_return(filt)\n",
" if _metric_name == \"Mean\":\n",
" filt_metric = filt.groupby(\"stop_value\").mean()\n",
" filt_bin_metric = filt_binned.groupby(\"bin_right\").mean()\n",
" elif _metric_name == \"Median\":\n",
" filt_metric = filt.groupby(\"stop_value\").median()\n",
" filt_bin_metric = filt_binned.groupby(\"bin_right\").median()\n",
" elif _metric_name == \"Win Rate\":\n",
" filt_metric = (filt > 0).groupby(\"stop_value\").mean()\n",
" filt_bin_metric = (filt_binned > 0).groupby(\"bin_right\").mean()\n",
" elif _metric_name == \"Expectancy\":\n",
" filt_metric = get_expectancy(filt, \"stop_value\")\n",
" filt_bin_metric = get_expectancy(filt_binned, \"bin_right\")\n",
" \n",
" stop_scatter.fig.update_layout(yaxis_title=_metric_name)\n",
" stop_scatter.update(filt_metric)\n",
" stop_scatter_img.value = stop_scatter.fig.to_image(format=\"png\")\n",
" \n",
" bin_scatter.fig.update_layout(yaxis_title=_metric_name)\n",
" bin_scatter.update(filt_bin_metric)\n",
" bin_scatter_img.value = bin_scatter.fig.to_image(format=\"png\")\n",
" \n",
" range_start_label.value = np.datetime_as_string(_from.to_datetime64(), unit=\"D\")\n",
" range_end_label.value = np.datetime_as_string(_to.to_datetime64(), unit=\"D\")\n",
" \n",
"asset_multi_select.observe(update_scatter, names=\"value\")\n",
"date_range_slider.observe(update_scatter, names=\"value\")\n",
"metric_dropdown.observe(update_scatter, names=\"value\")\n",
"update_scatter()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dashboard = ipywidgets.VBox([\n",
" asset_multi_select,\n",
" ipywidgets.HBox([\n",
" range_start_label,\n",
" date_range_slider,\n",
" range_end_label\n",
" ]),\n",
" metric_dropdown,\n",
" stop_scatter_img,\n",
" bin_scatter_img\n",
"])\n",
"dashboard"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dashboard.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"state": {},
"version_major": 2,
"version_minor": 0
}
}
},
"nbformat": 4,
"nbformat_minor": 4
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,285 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this example, we will build a Telegram bot that sends a signal once any Bollinger Band has been crossed. We will periodically query for the latest OHLCV data of the selected cryptocurrencies and append this data to our data pool. Additionally to receiving signals, any Telegram user can join the group and ask the bot to provide him with the current information. If the price change is higher than some number of standard deviations from the mean, while crossing the band, the bot sends a funny GIF."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from telegram import __version__ as TG_VER\n",
"\n",
"try:\n",
" from telegram import __version_info__\n",
"except ImportError:\n",
" __version_info__ = (0, 0, 0, 0, 0)\n",
"\n",
"if __version_info__ >= (20, 0, 0, \"alpha\", 1):\n",
" raise RuntimeError(f\"This example is not compatible with your current PTB version {TG_VER}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from vectorbtpro import *\n",
"# whats_imported()\n",
"\n",
"import logging"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n",
"logger = logging.getLogger(__name__)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Telegram\n",
"vbt.settings.messaging['telegram']['token'] = \"YOUR_TOKEN\"\n",
"\n",
"# Giphy\n",
"vbt.settings.messaging['giphy']['api_key'] = \"YOUR_API_KEY\"\n",
"\n",
"# Data\n",
"SYMBOLS = ['BTC/USDT', 'ETH/USDT', 'ADA/USDT']\n",
"START = '1 hour ago UTC'\n",
"TIMEFRAME = '1m'\n",
"UPDATE_EVERY = vbt.utils.datetime_.interval_to_ms(TIMEFRAME) // 1000 # in seconds\n",
"DT_FORMAT = '%d %b %Y %H:%M:%S %z'\n",
"IND_PARAMS = dict(\n",
" timeperiod=20, \n",
" nbdevup=2, \n",
" nbdevdn=2\n",
")\n",
"CHANGE_NBDEV = 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data = vbt.CCXTData.pull(SYMBOLS, start=START, timeframe=TIMEFRAME)\n",
"\n",
"print(data.wrapper.index)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_bbands(data):\n",
" return vbt.IndicatorFactory.from_talib('BBANDS').run(\n",
" data.get('Close'), **IND_PARAMS, hide_params=list(IND_PARAMS.keys()))\n",
"\n",
"\n",
"def get_info(bbands):\n",
" info = dict()\n",
" info['last_price'] = bbands.close.iloc[-1]\n",
" info['last_change'] = (bbands.close.iloc[-1] - bbands.close.iloc[-2]) / bbands.close.iloc[-1]\n",
" info['last_crossed_above_upper'] = bbands.close_crossed_above(bbands.upperband).iloc[-1]\n",
" info['last_crossed_below_upper'] = bbands.close_crossed_below(bbands.upperband).iloc[-1]\n",
" info['last_crossed_below_lower'] = bbands.close_crossed_below(bbands.lowerband).iloc[-1]\n",
" info['last_crossed_above_lower'] = bbands.close_crossed_above(bbands.lowerband).iloc[-1]\n",
" info['bw'] = (bbands.upperband - bbands.lowerband) / bbands.middleband\n",
" info['last_bw_zscore'] = info['bw'].vbt.zscore().iloc[-1]\n",
" info['last_change_zscore'] = bbands.close.vbt.pct_change().vbt.zscore().iloc[-1]\n",
" info['last_change_pos'] = info['last_change_zscore'] >= CHANGE_NBDEV\n",
" info['last_change_neg'] = info['last_change_zscore'] <= -CHANGE_NBDEV\n",
" return info\n",
"\n",
"\n",
"def format_symbol_info(symbol, info):\n",
" last_change = info['last_change'][symbol]\n",
" last_price = info['last_price'][symbol]\n",
" last_bw_zscore = info['last_bw_zscore'][symbol]\n",
" return \"{} ({:.2%}, {}, {:.2f})\".format(symbol, last_change, last_price, last_bw_zscore)\n",
"\n",
"\n",
"def format_signals_info(emoji, signals, info):\n",
" symbols = signals.index[signals]\n",
" symbol_msgs = []\n",
" for symbol in symbols:\n",
" symbol_msgs.append(format_symbol_info(symbol, info))\n",
" return \"{} {}\".format(emoji, ', '.join(symbol_msgs))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from telegram.ext import CommandHandler\n",
"\n",
"class MyTelegramBot(vbt.TelegramBot):\n",
" def __init__(self, data, **kwargs):\n",
" super().__init__(data=data, **kwargs)\n",
" \n",
" self.data = data\n",
" self.update_ts = data.wrapper.index[-1]\n",
" \n",
" @property\n",
" def custom_handlers(self):\n",
" return (CommandHandler('info', self.info_callback),)\n",
" \n",
" def info_callback(self, update, context):\n",
" chat_id = update.effective_chat.id\n",
" if len(context.args) != 1:\n",
" await self.send_message(chat_id, \"Please provide one symbol.\")\n",
" return\n",
" symbol = context.args[0]\n",
" if symbol not in SYMBOLS:\n",
" await self.send_message(chat_id, f\"There is no such symbol as \\\"{symbol}\\\".\")\n",
" return\n",
" \n",
" bbands = get_bbands(self.data)\n",
" info = get_info(bbands)\n",
" messages = [format_symbol_info(symbol, info)]\n",
" message = '\\n'.join([\"{}:\".format(self.update_ts.strftime(DT_FORMAT))] + messages)\n",
" await self.send_message(chat_id, message)\n",
" \n",
" @property\n",
" def start_message(self):\n",
" index = self.data.wrapper.index\n",
" return f\"\"\"Hello! \n",
"\n",
"Starting with {len(index)} rows from {index[0].strftime(DT_FORMAT)} to {index[-1].strftime(DT_FORMAT)}.\"\"\"\n",
" \n",
" @property\n",
" def help_message(self):\n",
" return \"\"\"Message format:\n",
"[event] [symbol] ([price change], [new price], [bandwidth z-score])\n",
" \n",
"Event legend:\n",
"⬆️ - Price went above upper band\n",
"⤵️ - Price retraced below upper band\n",
"⬇️ - Price went below lower band\n",
"⤴️ - Price retraced above lower band\n",
"\n",
"GIF is sent once a band is crossed and the price change is 2 stds from the mean.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"telegram_bot = MyTelegramBot(data)\n",
"telegram_bot.start(in_background=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class MyDataUpdater(vbt.DataUpdater):\n",
" _expected_keys=None\n",
" \n",
" def __init__(self, data, telegram_bot, **kwargs):\n",
" super().__init__(data, telegram_bot=telegram_bot, **kwargs)\n",
" \n",
" self.telegram_bot = telegram_bot\n",
" self.update_ts = data.wrapper.index[-1]\n",
" \n",
" def update(self):\n",
" super().update()\n",
" self.update_ts = vbt.timestamp(tz=self.update_ts.tz)\n",
" self.telegram_bot.data = self.data\n",
" self.telegram_bot.update_ts = self.update_ts\n",
" \n",
" bbands = get_bbands(self.data)\n",
" info = get_info(bbands)\n",
" \n",
" messages = []\n",
" if info['last_crossed_above_upper'].any():\n",
" messages.append(format_signals_info('⬆️', info['last_crossed_above_upper'], info))\n",
" if info['last_crossed_below_upper'].any():\n",
" messages.append(format_signals_info('⤵️', info['last_crossed_below_upper'], info))\n",
" if info['last_crossed_below_lower'].any():\n",
" messages.append(format_signals_info('⬇️', info['last_crossed_below_lower'], info))\n",
" if info['last_crossed_above_lower'].any():\n",
" messages.append(format_signals_info('⤴️', info['last_crossed_above_lower'], info))\n",
" \n",
" if len(messages) > 0:\n",
" message = '\\n'.join([\"{}:\".format(self.update_ts.strftime(DT_FORMAT))] + messages)\n",
" self.telegram_bot.send_message_to_all(message)\n",
" if (info['last_crossed_above_upper'] & info['last_change_pos']).any():\n",
" self.telegram_bot.send_giphy_to_all(\"launch\")\n",
" if (info['last_crossed_below_lower'] & info['last_change_neg']).any():\n",
" self.telegram_bot.send_giphy_to_all(\"fall\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_updater = MyDataUpdater(data, telegram_bot)\n",
"data_updater.update_every(UPDATE_EVERY)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"telegram_bot.stop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
}
},
"nbformat": 4,
"nbformat_minor": 4
}