85 KiB
85 KiB
In [ ]:
from vectorbtpro import * # whats_imported() vbt.settings.set_theme("dark")
In [ ]:
# data = vbt.BinanceData.pull( # ["BTCUSDT", "ETHUSDT", "BNBUSDT", "XRPUSDT", "ADAUSDT"], # start="2020-01-01 UTC", # end="2021-01-01 UTC", # timeframe="1h" # ) # data.to_hdf()
In [ ]:
data = vbt.HDFData.pull("BinanceData.h5")
Allocation¶
Manually¶
Index points¶
In [ ]:
ms_points = data.wrapper.get_index_points(every="M") ms_points
In [ ]:
data.wrapper.index.get_indexer( pd.Series(index=data.wrapper.index).resample(vbt.offset("M")).asfreq().index, method="bfill" )
In [ ]:
data.wrapper.index[ms_points]
In [ ]:
example_points = data.wrapper.get_index_points(every=24 * 30) data.wrapper.index[example_points]
In [ ]:
date_offset = pd.offsets.WeekOfMonth(week=3, weekday=4) example_points = data.wrapper.get_index_points( every=date_offset, add_delta=pd.Timedelta(hours=17) ) data.wrapper.index[example_points]
In [ ]:
example_points = data.wrapper.get_index_points( start="April 1st 2020", every="M" ) data.wrapper.index[example_points]
In [ ]:
example_points = data.wrapper.get_index_points( on=["April 1st 2020 19:45", "17 September 2020 00:01"] ) data.wrapper.index[example_points]
Filling¶
In [ ]:
symbol_wrapper = data.get_symbol_wrapper(freq="1h") filled_allocations = symbol_wrapper.fill() print(filled_allocations)
In [ ]:
np.random.seed(42) def random_allocate_func(): weights = np.random.uniform(size=symbol_wrapper.shape[1]) return weights / weights.sum() for idx in ms_points: filled_allocations.iloc[idx] = random_allocate_func() allocations = filled_allocations[~filled_allocations.isnull().any(axis=1)] allocations
Simulation¶
In [ ]:
pf = vbt.Portfolio.from_orders( close=data.get("Close"), size=filled_allocations, size_type="targetpercent", group_by=True, cash_sharing=True, call_seq="auto" )
In [ ]:
sim_alloc = pf.get_asset_value(group_by=False).vbt / pf.value print(sim_alloc)
In [ ]:
sim_alloc.vbt.plot( trace_kwargs=dict(stackgroup="one"), use_gl=False ).show_svg()
In [ ]:
pf.plot_allocations().show_svg()
In [ ]:
np.isclose(allocations, sim_alloc.iloc[ms_points])
Allocation method¶
In [ ]:
np.random.seed(42) pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, random_allocate_func, every="M" )
In [ ]:
print(pfo.allocations)
In [ ]:
print(pfo.filled_allocations)
In [ ]:
print(pfo.alloc_records.records_readable)
In [ ]:
pfo.plot().show_svg()
In [ ]:
pfo.stats()
In [ ]:
pf = vbt.Portfolio.from_optimizer(data, pfo, freq="1h") pf.sharpe_ratio
In [ ]:
pf = pfo.simulate(data, freq="1h") pf.sharpe_ratio
Once¶
In [ ]:
def const_allocate_func(target_alloc): return target_alloc pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, const_allocate_func, [0.5, 0.2, 0.1, 0.1, 0.1] ) pfo.plot().show_svg()
Parsing¶
In [ ]:
custom_index = vbt.date_range("2020-01-01", "2021-01-01", freq="Q") custom_allocations = pd.DataFrame( [ [0.5, 0.2, 0.1, 0.1, 0.1], [0.1, 0.5, 0.2, 0.1, 0.1], [0.1, 0.1, 0.5, 0.2, 0.1], [0.1, 0.1, 0.1, 0.5, 0.2] ], index=custom_index, columns=symbol_wrapper.columns )
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocations( symbol_wrapper, custom_allocations ) print(pfo.allocations)
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocations( symbol_wrapper, custom_allocations.values, start="2020-01-01", end="2021-01-01", every="Q" ) print(pfo.allocations)
In [ ]:
pfo = vbt.PortfolioOptimizer.from_filled_allocations( pfo.fill_allocations() ) print(pfo.allocations)
Templates¶
In [ ]:
def rotation_allocate_func(wrapper, i): weights = np.full(len(wrapper.columns), 0) weights[i % len(wrapper.columns)] = 1 return weights pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, rotation_allocate_func, vbt.Rep("wrapper"), vbt.Rep("i"), every="M" ) pfo.plot().show_svg()
In [ ]:
def rotation_allocate_func(symbols, chosen_symbol): return {s: 1 if s == chosen_symbol else 0 for s in symbols} pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, rotation_allocate_func, vbt.RepEval("wrapper.columns"), vbt.RepEval("wrapper.columns[i % len(wrapper.columns)]"), every="M" )
In [ ]:
print(pfo.allocations)
Groups¶
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, const_allocate_func, [0.5, 0.2, 0.1, 0.1, 0.1], every=vbt.Param(["1M", "2M", "3M"]) ) pf = pfo.simulate(data, freq="1h") pf.total_return
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, const_allocate_func, vbt.Param([ [0.5, 0.2, 0.1, 0.1, 0.1], [0.2, 0.1, 0.1, 0.1, 0.5] ], keys=pd.Index(["w1", "w2"], name="weights")), every=vbt.Param(["1M", "2M", "3M"]) )
In [ ]:
pfo.wrapper.grouper.get_index()
In [ ]:
pfo.wrapper.columns
In [ ]:
pfo[("3M", "w2")].stats()
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, const_allocate_func, group_configs=[ dict(args=([0.5, 0.2, 0.1, 0.1, 0.1],), every="1M"), dict(args=([0.2, 0.1, 0.1, 0.1, 0.5],), every="2M"), dict(args=([0.1, 0.1, 0.1, 0.5, 0.2],), every="3M"), dict(args=([0.1, 0.1, 0.5, 0.2, 0.1],), every="1M"), dict(args=([0.1, 0.5, 0.2, 0.1, 0.1],), every="2M"), dict(args=([0.5, 0.2, 0.1, 0.1, 0.1],), every="3M"), ] ) pfo.wrapper.grouper.get_index()
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, const_allocate_func, group_configs=[ dict( allocate_func=const_allocate_func, args=([0.5, 0.2, 0.1, 0.1, 0.1],), _name="const" ), dict( allocate_func=random_allocate_func, every="M", _name="random" ), ] ) pfo.wrapper.grouper.get_index()
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, const_allocate_func, group_configs={ "const": dict( allocate_func=const_allocate_func, args=([0.5, 0.2, 0.1, 0.1, 0.1],) ), "random": dict( allocate_func=random_allocate_func, ), }, every=vbt.Param(["1M", "2M", "3M"]) ) pfo.wrapper.grouper.get_index()
Numba¶
In [ ]:
@njit def rotation_allocate_func_nb(i, idx, n_cols): weights = np.full(n_cols, 0) weights[i % n_cols] = 1 return weights pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, rotation_allocate_func_nb, vbt.RepEval("len(wrapper.columns)"), every="W", jitted_loop=True ) print(pfo.allocations.head())
Distribution¶
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, rotation_allocate_func_nb, vbt.Rep("i"), vbt.Rep("index_point"), vbt.RepEval("len(wrapper.columns)"), every="D", execute_kwargs=dict(engine="dask") ) print(pfo.allocations.head())
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, rotation_allocate_func_nb, vbt.RepEval("len(wrapper.columns)"), every="D", jitted_loop=True, chunked=dict( arg_take_spec=dict(args=vbt.ArgsTaker(None)), engine="dask" ) ) print(pfo.allocations.head())
In [ ]:
pfo = vbt.PortfolioOptimizer.from_allocate_func( symbol_wrapper, rotation_allocate_func_nb, vbt.RepEval("len(wrapper.columns)"), every="D", jitted_loop=True, jitted=dict(parallel=True) ) print(pfo.allocations.head())
Optimization¶
Index ranges¶
In [ ]:
example_ranges = data.wrapper.get_index_ranges(every="M") example_ranges[0]
In [ ]:
example_ranges[1]
In [ ]:
data.wrapper.index[example_ranges[0][0]:example_ranges[1][0]]
In [ ]:
example_ranges = data.wrapper.get_index_ranges( every="M", lookback_period="3M" ) def get_index_bounds(range_starts, range_ends): for i in range(len(range_starts)): range_index = data.wrapper.index[range_starts[i]:range_ends[i]] yield range_index[0], range_index[-1] list(get_index_bounds(*example_ranges))
In [ ]:
example_ranges = data.wrapper.get_index_ranges( start=["2020-01-01", "2020-04-01", "2020-08-01"], end=["2020-04-01", "2020-08-01", "2020-12-01"] ) list(get_index_bounds(*example_ranges))
In [ ]:
example_ranges = data.wrapper.get_index_ranges( start="2020-01-01", end=["2020-04-01", "2020-08-01", "2020-12-01"] ) list(get_index_bounds(*example_ranges))
In [ ]:
example_ranges = data.wrapper.get_index_ranges( every="Q", exact_start=True, fixed_start=True ) list(get_index_bounds(*example_ranges))
Optimization method¶
In [ ]:
def inv_rank_optimize_func(price, index_slice): price_period = price.iloc[index_slice] first_price = price_period.iloc[0] last_price = price_period.iloc[-1] ret = (last_price - first_price) / first_price ranks = ret.rank(ascending=False) return ranks / ranks.sum() pfo = vbt.PortfolioOptimizer.from_optimize_func( symbol_wrapper, inv_rank_optimize_func, data.get("Close"), vbt.Rep("index_slice"), every="M" ) print(pfo.allocations)
In [ ]:
print(pfo.alloc_records.records_readable)
In [ ]:
start_idx = pfo.alloc_records.values[0]["start_idx"] end_idx = pfo.alloc_records.values[0]["end_idx"] close_period = data.get("Close").iloc[start_idx:end_idx] close_period.vbt.rebase(1).vbt.plot().show_svg()
In [ ]:
pfo.stats()
In [ ]:
pfo.plots().show_svg()
Numba¶
In [ ]:
@njit def inv_rank_optimize_func_nb(i, start_idx, end_idx, price): price_period = price[start_idx:end_idx] first_price = price_period[0] last_price = price_period[-1] ret = (last_price - first_price) / first_price ranks = vbt.nb.rank_1d_nb(-ret) return ranks / ranks.sum() pfo = vbt.PortfolioOptimizer.from_optimize_func( symbol_wrapper, inv_rank_optimize_func_nb, data.get("Close").values, every="M", jitted_loop=True ) print(pfo.allocations)
Integrations¶
PyPortfolioOpt¶
In [ ]:
from pypfopt.expected_returns import mean_historical_return from pypfopt.risk_models import CovarianceShrinkage from pypfopt.efficient_frontier import EfficientFrontier expected_returns = mean_historical_return(data.get("Close")) cov_matrix = CovarianceShrinkage(data.get("Close")).ledoit_wolf() optimizer = EfficientFrontier(expected_returns, cov_matrix) weights = optimizer.max_sharpe() weights
Parsing¶
In [ ]:
from vectorbtpro.portfolio.pfopt.base import resolve_pypfopt_func_kwargs print(vbt.format_func(mean_historical_return)) print(vbt.prettify(resolve_pypfopt_func_kwargs( mean_historical_return, prices=data.get("Close"), freq="1h", year_freq="365d", other_arg=100 )))
In [ ]:
print(vbt.prettify(resolve_pypfopt_func_kwargs( EfficientFrontier, prices=data.get("Close") )))
In [ ]:
print(vbt.prettify(resolve_pypfopt_func_kwargs( EfficientFrontier, prices=data.get("Close"), expected_returns="ema_historical_return", cov_matrix="sample_cov" )))
Auto-optimization¶
In [ ]:
vbt.pypfopt_optimize(prices=data.get("Close"))
In [ ]:
S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() ef = EfficientFrontier(None, S, weight_bounds=(-1, 1)) ef.min_volatility() weights = ef.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), expected_returns=None, weight_bounds=(-1, 1), target="min_volatility" )
In [ ]:
from pypfopt.expected_returns import capm_return sector_mapper = { "ADAUSDT": "DeFi", "BNBUSDT": "DeFi", "BTCUSDT": "Payment", "ETHUSDT": "DeFi", "XRPUSDT": "Payment" } sector_lower = { "DeFi": 0.75 } sector_upper = {} mu = capm_return(data.get("Close")) S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() ef = EfficientFrontier(mu, S) ef.add_sector_constraints(sector_mapper, sector_lower, sector_upper) adausdt_index = ef.tickers.index("ADAUSDT") ef.add_constraint(lambda w: w[adausdt_index] == 0.10) ef.max_sharpe() weights = ef.clean_weights() weights
In [ ]:
adausdt_index = list(sector_mapper.keys()).index("ADAUSDT") vbt.pypfopt_optimize( prices=data.get("Close"), sector_mapper=sector_mapper, sector_lower=sector_lower, sector_upper=sector_upper, constraints=[lambda w: w[adausdt_index] == 0.10], expected_returns="capm_return" )
In [ ]:
from pypfopt.objective_functions import L2_reg mu = capm_return(data.get("Close")) S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() ef = EfficientFrontier(mu, S) ef.add_sector_constraints(sector_mapper, sector_lower, sector_upper) ef.add_objective(L2_reg, gamma=0.1) # gamme is the tuning parameter ef.efficient_risk(0.15) weights = ef.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), expected_returns="capm_return", sector_mapper=sector_mapper, sector_lower=sector_lower, sector_upper=sector_upper, objectives=["L2_reg"], gamma=0.1, target="efficient_risk", target_volatility=0.15 )
In [ ]:
from pypfopt import EfficientSemivariance from pypfopt.expected_returns import returns_from_prices mu = capm_return(data.get("Close")) returns = returns_from_prices(data.get("Close")) returns = returns.dropna() es = EfficientSemivariance(mu, returns) es.efficient_return(0.01) weights = es.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), expected_returns="capm_return", optimizer="efficient_semivariance", target="efficient_return", target_return=0.01 )
In [ ]:
initial_weights = np.array([1 / len(data.symbols)] * len(data.symbols))
In [ ]:
from pypfopt.objective_functions import transaction_cost mu = mean_historical_return(data.get("Close")) S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() ef = EfficientFrontier(mu, S) ef.add_objective(transaction_cost, w_prev=initial_weights, k=0.001) ef.add_objective(L2_reg, gamma=0.05) ef.min_volatility() weights = ef.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), objectives=["transaction_cost", "L2_reg"], w_prev=initial_weights, k=0.001, gamma=0.05, target="min_volatility" )
In [ ]:
import cvxpy as cp def logarithmic_barrier_objective(w, cov_matrix, k=0.1): log_sum = cp.sum(cp.log(w)) var = cp.quad_form(w, cov_matrix) return var - k * log_sum
In [ ]:
mu = mean_historical_return(data.get("Close")) S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() ef = EfficientFrontier(mu, S, weight_bounds=(0.01, 0.3)) ef.convex_objective(logarithmic_barrier_objective, cov_matrix=S, k=0.001) weights = ef.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), weight_bounds=(0.01, 0.3), k=0.001, target=logarithmic_barrier_objective )
In [ ]:
def deviation_risk_parity(w, cov_matrix): cov_matrix = np.asarray(cov_matrix) n = cov_matrix.shape[0] rp = (w * (cov_matrix @ w)) / cp.quad_form(w, cov_matrix) return cp.sum_squares(rp - 1 / n).value
In [ ]:
mu = mean_historical_return(data.get("Close")) S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() ef = EfficientFrontier(mu, S) ef.nonconvex_objective(deviation_risk_parity, ef.cov_matrix) weights = ef.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), target=deviation_risk_parity, target_is_convex=False )
In [ ]:
sp500_data = vbt.YFData.pull( "^GSPC", start=data.wrapper.index[0], end=data.wrapper.index[-1] ) market_caps = data.get("Close") * data.get("Volume") viewdict = { "ADAUSDT": 0.20, "BNBUSDT": -0.30, "BTCUSDT": 0, "ETHUSDT": -0.2, "XRPUSDT": 0.15 }
In [ ]:
from pypfopt.black_litterman import ( market_implied_risk_aversion, market_implied_prior_returns, BlackLittermanModel ) S = CovarianceShrinkage(data.get("Close")).ledoit_wolf() delta = market_implied_risk_aversion(sp500_data.get("Close")) prior = market_implied_prior_returns(market_caps.iloc[-1], delta, S) bl = BlackLittermanModel(S, pi=prior, absolute_views=viewdict) rets = bl.bl_returns() ef = EfficientFrontier(rets, S) ef.min_volatility() weights = ef.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), expected_returns="bl_returns", market_prices=sp500_data.get("Close"), market_caps=market_caps.iloc[-1], absolute_views=viewdict, target="min_volatility" )
In [ ]:
from pypfopt import HRPOpt rets = returns_from_prices(data.get("Close")) hrp = HRPOpt(rets) hrp.optimize() weights = hrp.clean_weights() weights
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), optimizer="hrp", target="optimize" )
Argument groups¶
In [ ]:
vbt.pypfopt_optimize( prices=data.get("Close"), expected_returns="bl_returns", market_prices=sp500_data.get("Close"), market_caps=market_caps.iloc[-1], absolute_views=viewdict, target="min_volatility", cov_matrix=vbt.pfopt_func_dict({ "EfficientFrontier": "sample_cov", "_def": "ledoit_wolf" }) )
Periodically¶
In [ ]:
pfo = vbt.PortfolioOptimizer.from_pypfopt( prices=data.get("Close"), every="W" ) pfo.plot().show_svg()
In [ ]:
pfo = vbt.PortfolioOptimizer.from_pypfopt( prices=data.get("Close"), every="W", target=vbt.Param([ "max_sharpe", "min_volatility", "max_quadratic_utility" ]) ) pfo.plot(column="min_volatility").show_svg()
In [ ]:
pf = pfo.simulate(data, freq="1h") pf.sharpe_ratio
Riskfolio-Lib¶
Parsing¶
In [ ]:
import riskfolio as rp returns = data.get("Close").vbt.to_returns() port = rp.Portfolio(returns=returns) port.assets_stats( method_mu="hist", method_cov="hist", d=0.94 ) w = port.optimization( model="Classic", rm="MV", obj="Sharpe", rf=0, l=0, hist=True ) print(w.T)
In [ ]:
from vectorbtpro.utils.parsing import get_func_arg_names get_func_arg_names(port.assets_stats)
In [ ]:
from vectorbtpro.portfolio.pfopt.base import resolve_riskfolio_func_kwargs resolve_riskfolio_func_kwargs( port.assets_stats, method_mu="hist", method_cov="hist", model="Classic" )
In [ ]:
resolve_riskfolio_func_kwargs( port.assets_stats, method_mu="hist", method_cov="hist", model="Classic", func_kwargs=dict( assets_stats=dict(method_mu="ewma1"), optimization=dict(model="BL") ) )
Auto-optimization¶
In [ ]:
vbt.riskfolio_optimize(returns)
In [ ]:
port = rp.Portfolio(returns=returns) port.assets_stats( method_mu="hist", method_cov="hist", d=0.94 ) w = port.optimization( model="Classic", rm="UCI", obj="Sharpe", rf=0, l=0, hist=True ) print(w.T)
In [ ]:
vbt.riskfolio_optimize( returns, method_mu="hist", method_cov="hist", d=0.94, rm="UCI", obj="Sharpe", rf=0, l=0, hist=True )
In [ ]:
port = rp.Portfolio(returns=returns) port.assets_stats( method_mu="hist", method_cov="hist", d=0.94 ) port.wc_stats( box="s", ellip="s", q=0.05, n_sim=3000, window=3, dmu=0.1, dcov=0.1, seed=0 ) w = port.wc_optimization( obj="Sharpe", rf=0, l=0, Umu="box", Ucov="box" ) print(w.T)
In [ ]:
vbt.riskfolio_optimize( returns, opt_method="wc", method_mu="hist", method_cov="hist", box="s", ellip="s", q=0.05, n_sim=3000, window=3, dmu=0.1, dcov=0.1, seed=0, obj="Sharpe", rf=0, l=0, Umu="box", Ucov="box" )
In [ ]:
vbt.riskfolio_optimize( returns, func_kwargs=dict( assets_stats=dict( opt_method="wc", method_mu="hist", method_cov="hist" ), wc_stats=dict( box="s", ellip="s", q=0.05, n_sim=3000, window=3, dmu=0.1, dcov=0.1, seed=0 ), wc_optimization=dict( obj="Sharpe", rf=0, l=0, Umu="box", Ucov="box" ) ) )
In [ ]:
port = rp.Portfolio(returns=returns) port.sht = True # Allows to use Short Weights port.uppersht = 0.3 # Maximum value of sum of short weights in absolute value port.upperlng = 1.3 # Maximum value of sum of positive weights port.budget = 1.0 # No leverage port.assets_stats( method_mu="hist", method_cov="hist", d=0.94 ) w = port.optimization( model="Classic", rm="MV", obj="Sharpe", rf=0, l=0, hist=True ) print(w.T)
In [ ]:
vbt.riskfolio_optimize( returns, sht=True, uppersht=0.3, upperlng=1.3, budget=1.0, method_mu="hist", method_cov="hist", d=0.94, rm="MV", obj="Sharpe", rf=0, l=0, hist=True )
In [ ]:
port = rp.Portfolio(returns=returns) port.assets_stats( method_mu="hist", method_cov="hist", d=0.94 ) asset_classes = {"Assets": returns.columns.tolist()} asset_classes = pd.DataFrame(asset_classes) constraints = { "Disabled": [False, False], "Type": ["All Assets", "Assets"], "Set": ["", ""], "Position": ["", "BTCUSDT"], "Sign": [">=", "<="], 'Weight': [0.1, 0.15], "Type Relative": ["", ""], "Relative Set": ["", ""], "Relative": ["", ""], "Factor": ["", ""], } constraints = pd.DataFrame(constraints) A, B = rp.assets_constraints(constraints, asset_classes) port.ainequality = A port.binequality = B w = port.optimization( model="Classic", rm="MV", obj="Sharpe", rf=0, l=0, hist=True ) print(w.T)
In [ ]:
vbt.riskfolio_optimize( returns, method_mu="hist", method_cov="hist", constraints=[{ "Type": "All Assets", "Sign": ">=", "Weight": 0.1 }, { "Type": "Assets", "Position": "BTCUSDT", "Sign": "<=", "Weight": 0.15 }], d=0.94, rm="MV", obj="Sharpe", rf=0, l=0, hist=True )
In [ ]:
tags = [ "Smart contracts", "Smart contracts", "Payments", "Smart contracts", "Payments" ]
In [ ]:
port = rp.Portfolio(returns=returns) port.assets_stats( method_mu="hist", method_cov="hist", d=0.94 ) asset_classes = { "Assets": returns.columns.tolist(), "Tags": tags } asset_classes = pd.DataFrame(asset_classes) constraints = { "Disabled": [False], "Type": ["Classes"], "Set": ["Tags"], "Position": ["Smart contracts"], "Sign": [">="], 'Weight': [0.8], "Type Relative": [""], "Relative Set": [""], "Relative": [""], "Factor": [""], } constraints = pd.DataFrame(constraints) A, B = rp.assets_constraints(constraints, asset_classes) port.ainequality = A port.binequality = B w = port.optimization( model="Classic", rm="MV", obj="Sharpe", rf=0, l=0, hist=True ) print(w.T)
In [ ]:
vbt.riskfolio_optimize( returns, method_mu="hist", method_cov="hist", asset_classes={"Tags": tags}, constraints=[{ "Type": "Classes", "Set": "Tags", "Position": "Smart contracts", "Sign": ">=", "Weight": 0.8 }], d=0.94, rm="MV", obj="Sharpe", rf=0, l=0, hist=True )
In [ ]:
port = rp.HCPortfolio(returns=returns) w = port.optimization( model="NCO", codependence="pearson", covariance="hist", obj="MinRisk", rm="MV", rf=0, l=2, linkage="ward", max_k=10, leaf_order=True ) print(w.T)
In [ ]:
vbt.riskfolio_optimize( returns, port_cls="HCPortfolio", model="NCO", codependence="pearson", covariance="hist", obj="MinRisk", rm="MV", rf=0, l=2, linkage="ward", max_k=10, leaf_order=True )
Periodically¶
In [ ]:
pfo = vbt.PortfolioOptimizer.from_riskfolio( returns=returns, every="W" ) pfo.plot().show_svg()
In [ ]:
pfo = vbt.PortfolioOptimizer.from_riskfolio( returns=returns, constraints=[{ "Type": "Assets", "Position": "BTCUSDT", "Sign": "<=", "Weight": vbt.Param([0.1, 0.2, 0.3], name="BTCUSDT_maxw") }], every="W", param_search_kwargs=dict(incl_types=list) )
In [ ]:
print(pfo.allocations.groupby("BTCUSDT_maxw").max())
Universal portfolios¶
In [ ]:
from universal import tools, algos with vbt.WarningsFiltered(): algo = algos.CRP() algo_result = algo.run(data.get("Close")) print(algo_result.weights)
In [ ]:
with vbt.WarningsFiltered(): algo = algos.DynamicCRP( n=30, min_history=7, metric='sharpe', alpha=0.01 ) algo_result = algo.run(data.get("Close").resample("D").last()) down_weights = algo_result.weights print(down_weights)
In [ ]:
weights = down_weights.vbt.realign( data.wrapper.index, freq="1h", source_rbound=True, target_rbound=True, ffill=False ) print(weights)
In [ ]:
with vbt.WarningsFiltered(): down_pfo = vbt.PortfolioOptimizer.from_universal_algo( "DynamicCRP", data.get("Close").resample("D").last(), n=vbt.Param([7, 14, 30, 90]), min_history=7, metric='sharpe', alpha=0.01 ) down_pfo.plot(column=90).show_svg()
In [ ]:
resampler = vbt.Resampler( down_pfo.wrapper.index, data.wrapper.index, target_freq="1h" ) pfo = down_pfo.resample(resampler)
In [ ]:
pf = pfo.simulate(data, freq="1h") pf.sharpe_ratio
Custom algorithm¶
In [ ]:
from universal.algo import Algo class MeanReversion(Algo): PRICE_TYPE = 'log' def __init__(self, n): self.n = n super().__init__(min_history=n) def init_weights(self, cols): return pd.Series(np.zeros(len(cols)), cols) def step(self, x, last_b, history): ma = history.iloc[-self.n:].mean() delta = x - ma w = np.maximum(-delta, 0.) return w / sum(w)
In [ ]:
with vbt.WarningsFiltered(): pfo = vbt.PortfolioOptimizer.from_universal_algo( MeanReversion, data.get("Close").resample("D").last(), n=30, every="W" ) pfo.plot().show_svg()
Dynamic¶
In [ ]:
GroupMemory = namedtuple("GroupMemory", [ "target_alloc", "size_type", "direction", "order_value_out" ]) @njit def pre_group_func_nb(c): group_memory = GroupMemory( target_alloc=np.full(c.group_len, np.nan), size_type=np.full(c.group_len, vbt.pf_enums.SizeType.TargetPercent), direction=np.full(c.group_len, vbt.pf_enums.Direction.Both), order_value_out=np.full(c.group_len, np.nan) ) return group_memory, @njit def pre_segment_func_nb( c, group_memory, min_history, threshold, allocate_func_nb, *args ): should_rebalance = False if c.i >= min_history: in_position = False for col in range(c.from_col, c.to_col): if c.last_position[col] != 0: in_position = True break if not in_position: should_rebalance = True else: curr_value = c.last_value[c.group] for group_col in range(c.group_len): col = c.from_col + group_col curr_position = c.last_position[col] curr_price = c.last_val_price[col] curr_alloc = curr_position * curr_price / curr_value curr_threshold = vbt.pf_nb.select_from_col_nb(c, col, threshold) alloc_diff = curr_alloc - group_memory.target_alloc[group_col] if abs(alloc_diff) >= curr_threshold: should_rebalance = True break if should_rebalance: allocate_func_nb(c, group_memory, *args) vbt.pf_nb.sort_call_seq_1d_nb( c, group_memory.target_alloc, group_memory.size_type, group_memory.direction, group_memory.order_value_out ) return group_memory, should_rebalance @njit def order_func_nb( c, group_memory, should_rebalance, price, fees ): if not should_rebalance: return vbt.pf_nb.order_nothing_nb() group_col = c.col - c.from_col return vbt.pf_nb.order_nb( size=group_memory.target_alloc[group_col], price=vbt.pf_nb.select_nb(c, price), size_type=group_memory.size_type[group_col], direction=group_memory.direction[group_col], fees=vbt.pf_nb.select_nb(c, fees) )
In [ ]:
@njit def uniform_allocate_func_nb(c, group_memory): for group_col in range(c.group_len): group_memory.target_alloc[group_col] = 1 / c.group_len
In [ ]:
def simulate_threshold_rebalancing(threshold, allocate_func_nb, *args, **kwargs): return vbt.Portfolio.from_order_func( data.get("Close"), open=data.get("Open"), pre_group_func_nb=pre_group_func_nb, pre_group_args=(), pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=( 0, vbt.Rep("threshold"), allocate_func_nb, *args ), order_func_nb=order_func_nb, order_args=(vbt.Rep('price'), vbt.Rep('fees')), broadcast_named_args=dict( price=data.get("Close"), fees=0.005, threshold=threshold ), cash_sharing=True, group_by=vbt.ExceptLevel("symbol"), freq='1h', **kwargs ) pf = simulate_threshold_rebalancing(0.05, uniform_allocate_func_nb) pf.plot_allocations().show_svg()
In [ ]:
pf = simulate_threshold_rebalancing( vbt.Param(np.arange(1, 16) / 100, name="threshold"), uniform_allocate_func_nb ) pf.sharpe_ratio
Post-analysis¶
In [ ]:
@njit def track_uniform_allocate_func_nb(c, group_memory, index_points, alloc_counter): for group_col in range(c.group_len): group_memory.target_alloc[group_col] = 1 / c.group_len index_points[alloc_counter[0]] = c.i alloc_counter[0] += 1 index_points = np.empty(data.wrapper.shape[0], dtype=np.int_) alloc_counter = np.full(1, 0) pf = simulate_threshold_rebalancing( 0.05, track_uniform_allocate_func_nb, index_points, alloc_counter ) index_points = index_points[:alloc_counter[0]] data.wrapper.index[index_points]
In [ ]:
@njit def random_allocate_func_nb( c, group_memory, alloc_points, alloc_weights, alloc_counter ): weights = np.random.uniform(0, 1, c.group_len) group_memory.target_alloc[:] = weights / weights.sum() group_count = alloc_counter[c.group] count = alloc_counter.sum() alloc_points["id"][count] = group_count alloc_points["col"][count] = c.group alloc_points["alloc_idx"][count] = c.i alloc_weights[count] = group_memory.target_alloc alloc_counter[c.group] += 1 thresholds = pd.Index(np.arange(1, 16) / 100, name="threshold") max_entries = data.wrapper.shape[0] * len(thresholds) alloc_points = np.empty(max_entries, dtype=vbt.pf_enums.alloc_point_dt) alloc_weights = np.empty((max_entries, len(data.symbols)), dtype=np.float_) alloc_counter = np.full(len(thresholds), 0) pf = simulate_threshold_rebalancing( vbt.Param(thresholds), random_allocate_func_nb, alloc_points, alloc_weights, alloc_counter, seed=42 ) alloc_points = alloc_points[:alloc_counter.sum()] alloc_weights = alloc_weights[:alloc_counter.sum()]
In [ ]:
@njit def random_allocate_func_nb(c, group_memory): weights = np.random.uniform(0, 1, c.group_len) group_memory.target_alloc[:] = weights / weights.sum() group_count = c.in_outputs.alloc_counter[c.group] count = c.in_outputs.alloc_counter.sum() c.in_outputs.alloc_points["id"][count] = group_count c.in_outputs.alloc_points["col"][count] = c.group c.in_outputs.alloc_points["alloc_idx"][count] = c.i c.in_outputs.alloc_weights[count] = group_memory.target_alloc c.in_outputs.alloc_counter[c.group] += 1 alloc_points = vbt.RepEval(""" max_entries = target_shape[0] * len(group_lens) np.empty(max_entries, dtype=alloc_point_dt) """, context=dict(alloc_point_dt=vbt.pf_enums.alloc_point_dt)) alloc_weights = vbt.RepEval(""" max_entries = target_shape[0] * len(group_lens) np.empty((max_entries, n_cols), dtype=np.float_) """, context=dict(n_cols=len(data.symbols))) alloc_counter = vbt.RepEval("np.full(len(group_lens), 0)") InOutputs = namedtuple("InOutputs", [ "alloc_points", "alloc_weights", "alloc_counter" ]) in_outputs = InOutputs( alloc_points=alloc_points, alloc_weights=alloc_weights, alloc_counter=alloc_counter, ) pf = simulate_threshold_rebalancing( vbt.Param(np.arange(1, 16) / 100, name="threshold"), random_allocate_func_nb, in_outputs=in_outputs, seed=42 ) alloc_points = pf.in_outputs.alloc_points[:pf.in_outputs.alloc_counter.sum()] alloc_weights = pf.in_outputs.alloc_weights[:pf.in_outputs.alloc_counter.sum()]
In [ ]:
pfo = vbt.PortfolioOptimizer( wrapper=pf.wrapper, alloc_records=vbt.AllocPoints( pf.wrapper.resolve(), alloc_points ), allocations=alloc_weights )
In [ ]:
print(pfo[0.1].allocations.describe())
In [ ]:
pfo.plot(column=0.1).show_svg()
In [ ]:
pfo.plot(column=0.03).show_svg()
In [ ]:
pf[0.03].plot_allocations().show_svg()
In [ ]:
pf.sharpe_ratio
In [ ]:
pf_new = vbt.Portfolio.from_optimizer( data, pfo, val_price=data.get("Open"), freq="1h", fees=0.005 ) pf_new.sharpe_ratio
Bonus 1: Own optimizer¶
In [ ]:
@njit(nogil=True) def optimize_portfolio_nb( close, val_price, range_starts, range_ends, optimize_func_nb, optimize_args=(), price=np.inf, fees=0., init_cash=100., group=0 ): val_price_ = vbt.to_2d_array_nb(np.asarray(val_price)) price_ = vbt.to_2d_array_nb(np.asarray(price)) fees_ = vbt.to_2d_array_nb(np.asarray(fees)) order_records = np.empty(close.shape, dtype=vbt.pf_enums.order_dt) order_counts = np.full(close.shape[1], 0, dtype=np.int_) order_value = np.empty(close.shape[1], dtype=np.float_) call_seq = np.empty(close.shape[1], dtype=np.int_) last_position = np.full(close.shape[1], 0.0, dtype=np.float_) last_debt = np.full(close.shape[1], 0.0, dtype=np.float_) last_locked_cash = np.full(close.shape[1], 0.0, dtype=np.float_) cash_now = float(init_cash) free_cash_now = float(init_cash) value_now = float(init_cash) for k in range(len(range_starts)): i = range_ends[k] size = optimize_func_nb( range_starts[k], range_ends[k], *optimize_args ) value_now = cash_now for col in range(close.shape[1]): val_price_now = vbt.flex_select_nb(val_price_, i, col) value_now += last_position[col] * val_price_now for col in range(close.shape[1]): val_price_now = vbt.flex_select_nb(val_price_, i, col) exec_state = vbt.pf_enums.ExecState( cash=cash_now, position=last_position[col], debt=last_debt[col], locked_cash=last_locked_cash[col], free_cash=free_cash_now, val_price=val_price_now, value=value_now, ) order_value[col] = vbt.pf_nb.approx_order_value_nb( exec_state, size[col], vbt.pf_enums.SizeType.TargetPercent, vbt.pf_enums.Direction.Both, ) call_seq[col] = col vbt.pf_nb.insert_argsort_nb(order_value, call_seq) for c in range(close.shape[1]): col = call_seq[c] order = vbt.pf_nb.order_nb( size=size[col], price=vbt.flex_select_nb(price_, i, col), size_type=vbt.pf_enums.SizeType.TargetPercent, direction=vbt.pf_enums.Direction.Both, fees=vbt.flex_select_nb(fees_, i, col), ) price_area = vbt.pf_enums.PriceArea( open=np.nan, high=np.nan, low=np.nan, close=vbt.flex_select_nb(close, i, col), ) val_price_now = vbt.flex_select_nb(val_price_, i, col) exec_state = vbt.pf_enums.ExecState( cash=cash_now, position=last_position[col], debt=last_debt[col], locked_cash=last_locked_cash[col], free_cash=free_cash_now, val_price=val_price_now, value=value_now, ) _, new_exec_state = vbt.pf_nb.process_order_nb( group=group, col=col, i=i, exec_state=exec_state, order=order, price_area=price_area, order_records=order_records, order_counts=order_counts ) cash_now = new_exec_state.cash free_cash_now = new_exec_state.free_cash value_now = new_exec_state.value last_position[col] = new_exec_state.position last_debt[col] = new_exec_state.debt last_locked_cash[col] = new_exec_state.locked_cash return vbt.nb.repartition_nb(order_records, order_counts)
In [ ]:
@njit(nogil=True) def sharpe_optimize_func_nb( start_idx, end_idx, close, num_tests, ann_factor ): close_period = close[start_idx:end_idx] returns = (close_period[1:] - close_period[:-1]) / close_period[:-1] mean = vbt.nb.nanmean_nb(returns) cov = np.cov(returns, rowvar=False) best_sharpe_ratio = -np.inf weights = np.full(close.shape[1], np.nan, dtype=np.float_) for i in range(num_tests): w = np.random.random_sample(close.shape[1]) w = w / np.sum(w) p_return = np.sum(mean * w) * ann_factor p_std = np.sqrt(np.dot(w.T, np.dot(cov, w))) * np.sqrt(ann_factor) sharpe_ratio = p_return / p_std if sharpe_ratio > best_sharpe_ratio: best_sharpe_ratio = sharpe_ratio weights = w return weights
In [ ]:
range_starts, range_ends = data.wrapper.get_index_ranges(every="W") ann_factor = vbt.timedelta("365d") / vbt.timedelta("1h") init_cash = 100 num_tests = 30 fees = 0.005 order_records = optimize_portfolio_nb( data.get("Close").values, data.get("Open").values, range_starts, range_ends, sharpe_optimize_func_nb, optimize_args=(data.get("Close").values, num_tests, ann_factor), fees=fees, init_cash=init_cash )
In [ ]:
pf = vbt.Portfolio( wrapper=symbol_wrapper.regroup(True), close=data.get("Close"), order_records=order_records, log_records=np.array([]), cash_sharing=True, init_cash=init_cash )
In [ ]:
pf.plot_allocations().show_svg()
Bonus 2: Parameterization¶
In [ ]:
def merge_func(order_records_list, param_index): sharpe_ratios = pd.Series(index=param_index, dtype=np.float_) for i, order_records in enumerate(order_records_list): pf = vbt.Portfolio( wrapper=symbol_wrapper.regroup(True), close=data.get("Close"), order_records=order_records, cash_sharing=True, init_cash=init_cash ) sharpe_ratios.iloc[i] = pf.sharpe_ratio return sharpe_ratios
In [ ]:
param_optimize_portfolio_nb = vbt.parameterized( optimize_portfolio_nb, merge_func=merge_func, merge_kwargs=dict(param_index=vbt.Rep("param_index")), engine="dask", chunk_len=4 )
In [ ]:
every_index = pd.Index(["D", "W", "M"], name="every") num_tests_index = pd.Index([30, 50, 100], name="num_tests") fees_index = pd.Index([0.0, 0.005, 0.01], name="fees") range_starts = [] range_ends = [] for every in every_index: index_ranges = symbol_wrapper.get_index_ranges(every=every) range_starts.append(index_ranges[0]) range_ends.append(index_ranges[1]) num_tests = num_tests_index.tolist() range_starts = vbt.Param(range_starts, level=0, keys=every_index) range_ends = vbt.Param(range_ends, level=0, keys=every_index) num_tests = vbt.Param(num_tests, level=1, keys=num_tests_index) fees = vbt.Param(fees_index.values, level=2, keys=fees_index)
In [ ]:
sharpe_ratios = param_optimize_portfolio_nb( data.get("Close").values, data.get("Open").values, range_starts, range_ends, sharpe_optimize_func_nb, optimize_args=( data.get("Close").values, num_tests, ann_factor ), fees=fees, init_cash=init_cash, group=vbt.Rep("config_idx") )
In [ ]:
sharpe_ratios
Bonus 3: Hyperopt¶
In [ ]:
def objective(kwargs): close_values = data.get("Close").values open_values = data.get("Open").values index_ranges = symbol_wrapper.get_index_ranges(every=kwargs["every"]) order_records = optimize_portfolio_nb( close_values, open_values, index_ranges[0], index_ranges[1], sharpe_optimize_func_nb, optimize_args=(close_values, kwargs["num_tests"], ann_factor), fees=vbt.to_2d_array(kwargs["fees"]), init_cash=init_cash ) pf = vbt.Portfolio( wrapper=symbol_wrapper.regroup(True), close=data.get("Close"), order_records=order_records, log_records=np.array([]), cash_sharing=True, init_cash=init_cash ) return -pf.sharpe_ratio
In [ ]:
from hyperopt import fmin, tpe, hp space = { "every": hp.choice("every", ["%dD" % n for n in range(1, 100)]), "num_tests": hp.quniform("num_tests", 5, 100, 1), "fees": hp.uniform('fees', 0, 0.05) } best = fmin( fn=objective, space=space, algo=tpe.suggest, max_evals=30 ) best
Bonus 4: Hybrid¶
In [ ]:
def optimize_func( data, index_slice, temp_allocations, temp_pfs, threshold ): sub_data = data.iloc[index_slice] if len(temp_allocations) > 0: prev_allocation = sub_data.symbol_wrapper.wrap( [temp_allocations[-1]], index=sub_data.wrapper.index[[0]] ) prev_pfo = vbt.PortfolioOptimizer.from_allocations( sub_data.symbol_wrapper, prev_allocation ) if len(temp_pfs) > 0: init_cash = temp_pfs[-1].cash.iloc[-1] init_position = temp_pfs[-1].assets.iloc[-1] init_price = temp_pfs[-1].close.iloc[-1] else: init_cash = 100. init_position = 0. init_price = np.nan prev_pf = prev_pfo.simulate( sub_data, init_cash=init_cash, init_position=init_position, init_price=init_price ) temp_pfs.append(prev_pf) should_rebalance = False curr_alloc = prev_pf.allocations.iloc[-1].values if (np.abs(curr_alloc - temp_allocations[-1]) >= threshold).any(): should_rebalance = True else: should_rebalance = True n_symbols = len(sub_data.symbols) if should_rebalance: new_alloc = np.full(n_symbols, 1 / n_symbols) else: new_alloc = np.full(n_symbols, np.nan) temp_allocations.append(new_alloc) return new_alloc pfs = [] allocations = [] pfopt = vbt.PortfolioOptimizer.from_optimize_func( data.symbol_wrapper, optimize_func, data, vbt.Rep("index_slice"), allocations, pfs, 0.03, every="W" ) pf = pfopt.simulate(data)
In [ ]:
final_values = pd.concat(map(lambda x: x.value[[-1]], pfs)) final_values
In [ ]:
pd.testing.assert_series_equal( final_values, pf.value.loc[final_values.index], )
In [ ]: