Initial strategy32 research and live runtime
This commit is contained in:
693
research/adverse_regime.py
Normal file
693
research/adverse_regime.py
Normal file
@@ -0,0 +1,693 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import asdict, dataclass
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.backtest.metrics import cagr, max_drawdown, sharpe_ratio
|
||||
from strategy29.common.constants import BTC_SYMBOL
|
||||
from strategy29.common.models import MarketDataBundle
|
||||
from strategy29.data.universe import select_tradeable_universe
|
||||
from strategy32.scripts.run_regime_filter_analysis import build_strategic_regime_frame
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AdverseRegimeEngineSpec:
|
||||
name: str
|
||||
target_regime: str
|
||||
family: str
|
||||
min_avg_dollar_volume: float = 50_000_000.0
|
||||
rebalance_bars: int = 6
|
||||
top_n: int = 2
|
||||
transaction_cost_pct: float = 0.0015
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AdverseRegimeEngineResult:
|
||||
name: str
|
||||
target_regime: str
|
||||
family: str
|
||||
total_return: float
|
||||
cagr: float
|
||||
sharpe: float
|
||||
max_drawdown: float
|
||||
active_bar_ratio: float
|
||||
rebalance_count: int
|
||||
equity_curve: pd.Series
|
||||
|
||||
def to_payload(self) -> dict[str, object]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"target_regime": self.target_regime,
|
||||
"family": self.family,
|
||||
"total_return": self.total_return,
|
||||
"cagr": self.cagr,
|
||||
"sharpe": self.sharpe,
|
||||
"max_drawdown": self.max_drawdown,
|
||||
"active_bar_ratio": self.active_bar_ratio,
|
||||
"rebalance_count": self.rebalance_count,
|
||||
}
|
||||
|
||||
|
||||
def default_engine_specs() -> list[AdverseRegimeEngineSpec]:
|
||||
return [
|
||||
AdverseRegimeEngineSpec("cap_cash", "CAPITULATION_STRESS", "cash", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("cap_btc_rebound", "CAPITULATION_STRESS", "btc_rebound", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("cap_alt_panic_rebound", "CAPITULATION_STRESS", "alt_panic_rebound", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("cap_funding_snapback_hedged", "CAPITULATION_STRESS", "funding_snapback_hedged", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("chop_cash", "CHOPPY_ROTATION", "cash", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("chop_pairs_mean_revert", "CHOPPY_ROTATION", "pairs_mean_revert", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("chop_quality_rotation", "CHOPPY_ROTATION", "quality_rotation", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("chop_carry_only", "CHOPPY_ROTATION", "carry_only", rebalance_bars=6),
|
||||
AdverseRegimeEngineSpec("chop_rs_spread", "CHOPPY_ROTATION", "rs_spread", rebalance_bars=6),
|
||||
AdverseRegimeEngineSpec("chop_btc_hedged_leader", "CHOPPY_ROTATION", "btc_hedged_leader", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("chop_carry_strict", "CHOPPY_ROTATION", "carry_only_strict", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("chop_inverse_carry", "CHOPPY_ROTATION", "inverse_carry", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("chop_inverse_carry_strict", "CHOPPY_ROTATION", "inverse_carry_strict", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_cash", "DISTRIBUTION_DRIFT", "cash", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("dist_btc_vs_weak_alt", "DISTRIBUTION_DRIFT", "btc_vs_weak_alt", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("dist_short_rally", "DISTRIBUTION_DRIFT", "short_rally", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("dist_weak_basket_short", "DISTRIBUTION_DRIFT", "weak_basket_short", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("dist_relative_weakness_spread", "DISTRIBUTION_DRIFT", "relative_weakness_spread", rebalance_bars=6),
|
||||
AdverseRegimeEngineSpec("dist_btc_rally_short", "DISTRIBUTION_DRIFT", "btc_rally_short", rebalance_bars=1, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_btc_rally_short_strict", "DISTRIBUTION_DRIFT", "btc_rally_short_strict", rebalance_bars=1, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_weak_rally_spread", "DISTRIBUTION_DRIFT", "weak_rally_spread", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("dist_inverse_carry", "DISTRIBUTION_DRIFT", "inverse_carry", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_inverse_carry_strict", "DISTRIBUTION_DRIFT", "inverse_carry_strict", rebalance_bars=6, top_n=1),
|
||||
]
|
||||
|
||||
|
||||
def load_fixed66_cache(path: str | Path) -> tuple[MarketDataBundle, pd.Timestamp, list[str]]:
|
||||
payload = pickle.loads(Path(path).read_bytes())
|
||||
return payload["bundle"], payload["latest_bar"], list(payload["accepted_symbols"])
|
||||
|
||||
|
||||
class AdverseRegimeResearchHarness:
|
||||
def __init__(self, bundle: MarketDataBundle, latest_bar: pd.Timestamp):
|
||||
self.bundle = bundle
|
||||
self.latest_bar = pd.Timestamp(latest_bar)
|
||||
self.timestamps = sorted(bundle.prices[BTC_SYMBOL]["timestamp"].tolist())
|
||||
self._regime_frame_cache: dict[pd.Timestamp, pd.DataFrame] = {}
|
||||
self.price_frames = {
|
||||
symbol: df.set_index("timestamp")[["close", "volume"]].sort_index()
|
||||
for symbol, df in bundle.prices.items()
|
||||
}
|
||||
self.funding_frames = {
|
||||
symbol: df.set_index("timestamp")[["funding_rate", "basis"]].sort_index()
|
||||
for symbol, df in bundle.funding.items()
|
||||
}
|
||||
|
||||
def build_regime_frame(self, eval_start: pd.Timestamp) -> pd.DataFrame:
|
||||
eval_start = pd.Timestamp(eval_start)
|
||||
cached = self._regime_frame_cache.get(eval_start)
|
||||
if cached is not None:
|
||||
return cached
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = MarketDataBundle(
|
||||
prices={symbol: df.loc[df["timestamp"] >= raw_start].copy() for symbol, df in self.bundle.prices.items()},
|
||||
funding={symbol: df.loc[df["timestamp"] >= raw_start].copy() for symbol, df in self.bundle.funding.items()},
|
||||
)
|
||||
frame = build_strategic_regime_frame(sliced, eval_start, self.latest_bar)
|
||||
self._regime_frame_cache[eval_start] = frame
|
||||
return frame
|
||||
|
||||
def run_engine(
|
||||
self,
|
||||
spec: AdverseRegimeEngineSpec,
|
||||
*,
|
||||
eval_start: pd.Timestamp,
|
||||
initial_capital: float = 1000.0,
|
||||
regime_frame: pd.DataFrame | None = None,
|
||||
) -> AdverseRegimeEngineResult:
|
||||
regime_frame = self.build_regime_frame(eval_start) if regime_frame is None else regime_frame
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
timestamps = [ts for ts in self.timestamps if ts >= eval_start]
|
||||
if len(timestamps) < 3:
|
||||
raise ValueError("not enough timestamps for adverse regime simulation")
|
||||
|
||||
equity = initial_capital
|
||||
equity_points = [pd.Timestamp(timestamps[0])]
|
||||
equity_values = [equity]
|
||||
current_weights: dict[str, float] = {}
|
||||
rebalance_count = 0
|
||||
active_bars = 0
|
||||
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = timestamps[i - 1]
|
||||
execution_ts = timestamps[i]
|
||||
|
||||
if current_weights:
|
||||
bar_ret = self._portfolio_return(current_weights, signal_ts, execution_ts)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
|
||||
target_weights = current_weights
|
||||
regime_name = regime_map.get(signal_ts, "")
|
||||
if regime_name != spec.target_regime:
|
||||
target_weights = {}
|
||||
elif (i - 1) % spec.rebalance_bars == 0:
|
||||
target_weights = self._target_weights(spec, signal_ts)
|
||||
if target_weights:
|
||||
active_bars += 1
|
||||
|
||||
turnover = self._turnover(current_weights, target_weights)
|
||||
if turnover > 0:
|
||||
rebalance_count += 1
|
||||
equity *= max(0.0, 1.0 - turnover * spec.transaction_cost_pct)
|
||||
current_weights = target_weights
|
||||
|
||||
equity_points.append(pd.Timestamp(execution_ts))
|
||||
equity_values.append(equity)
|
||||
|
||||
equity_curve = pd.Series(equity_values, index=pd.Index(equity_points, name="timestamp"), dtype=float)
|
||||
return AdverseRegimeEngineResult(
|
||||
name=spec.name,
|
||||
target_regime=spec.target_regime,
|
||||
family=spec.family,
|
||||
total_return=equity_curve.iloc[-1] / equity_curve.iloc[0] - 1.0,
|
||||
cagr=cagr(equity_curve),
|
||||
sharpe=sharpe_ratio(equity_curve, 6),
|
||||
max_drawdown=max_drawdown(equity_curve),
|
||||
active_bar_ratio=active_bars / max(len(timestamps) - 1, 1),
|
||||
rebalance_count=rebalance_count,
|
||||
equity_curve=equity_curve,
|
||||
)
|
||||
|
||||
def target_weights(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
return self._target_weights(spec, pd.Timestamp(timestamp))
|
||||
|
||||
def _target_weights(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
if spec.family == "cash":
|
||||
return {}
|
||||
if spec.family == "btc_rebound":
|
||||
return self._cap_btc_rebound(timestamp)
|
||||
if spec.family == "alt_panic_rebound":
|
||||
return self._cap_alt_panic_rebound(spec, timestamp)
|
||||
if spec.family == "funding_snapback_hedged":
|
||||
return self._cap_funding_snapback_hedged(spec, timestamp)
|
||||
if spec.family == "pairs_mean_revert":
|
||||
return self._chop_pairs_mean_revert(spec, timestamp)
|
||||
if spec.family == "quality_rotation":
|
||||
return self._chop_quality_rotation(spec, timestamp)
|
||||
if spec.family == "carry_only":
|
||||
return self._carry_only(spec, timestamp)
|
||||
if spec.family == "rs_spread":
|
||||
return self._chop_rs_spread(spec, timestamp)
|
||||
if spec.family == "btc_hedged_leader":
|
||||
return self._chop_btc_hedged_leader(spec, timestamp)
|
||||
if spec.family == "carry_only_strict":
|
||||
return self._carry_only_strict(spec, timestamp)
|
||||
if spec.family == "inverse_carry":
|
||||
return self._inverse_carry(spec, timestamp, strict=False)
|
||||
if spec.family == "inverse_carry_strict":
|
||||
return self._inverse_carry(spec, timestamp, strict=True)
|
||||
if spec.family == "btc_vs_weak_alt":
|
||||
return self._dist_btc_vs_weak_alt(spec, timestamp)
|
||||
if spec.family == "short_rally":
|
||||
return self._dist_short_rally(spec, timestamp)
|
||||
if spec.family == "weak_basket_short":
|
||||
return self._dist_weak_basket_short(spec, timestamp)
|
||||
if spec.family == "relative_weakness_spread":
|
||||
return self._dist_relative_weakness_spread(spec, timestamp)
|
||||
if spec.family == "btc_rally_short":
|
||||
return self._dist_btc_rally_short(timestamp)
|
||||
if spec.family == "btc_rally_short_strict":
|
||||
return self._dist_btc_rally_short_strict(timestamp)
|
||||
if spec.family == "weak_rally_spread":
|
||||
return self._dist_weak_rally_spread(spec, timestamp)
|
||||
raise ValueError(f"unsupported family: {spec.family}")
|
||||
|
||||
def _cap_btc_rebound(self, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
hist = self._price_hist(BTC_SYMBOL, timestamp, 24)
|
||||
if len(hist) < 19:
|
||||
return {}
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
ret_1b = self._return_from_hist(hist, 1)
|
||||
if ret_3d > -0.10 or ret_1b <= 0.0:
|
||||
return {}
|
||||
return {BTC_SYMBOL: 1.0}
|
||||
|
||||
def _cap_alt_panic_rebound(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 24)
|
||||
if len(hist) < 19:
|
||||
continue
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
ret_1b = self._return_from_hist(hist, 1)
|
||||
funding = self._latest_funding(symbol, timestamp)
|
||||
if ret_3d > -0.12 or ret_1b <= 0.0:
|
||||
continue
|
||||
score = (-ret_3d) + max(-funding, 0.0) * 200.0 + ret_1b * 2.0
|
||||
candidates.append((score, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
return self._equal_weight(symbols, 1.0)
|
||||
|
||||
def _cap_funding_snapback_hedged(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 24)
|
||||
if len(hist) < 19:
|
||||
continue
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
ret_1b = self._return_from_hist(hist, 1)
|
||||
funding = self._latest_funding(symbol, timestamp)
|
||||
if funding >= 0.0 or ret_3d > -0.08 or ret_1b <= 0.0:
|
||||
continue
|
||||
score = max(-funding, 0.0) * 260.0 + (-ret_3d) * 0.6 + ret_1b
|
||||
candidates.append((score, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
weights = self._equal_weight(symbols, 0.70)
|
||||
weights[BTC_SYMBOL] = weights.get(BTC_SYMBOL, 0.0) - 0.30
|
||||
return weights
|
||||
|
||||
def _chop_pairs_mean_revert(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
rows: list[tuple[float, float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 18)
|
||||
if len(hist) < 7:
|
||||
continue
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
vol = float(hist["close"].pct_change().dropna().tail(12).std(ddof=0))
|
||||
if vol <= 0 or vol > 0.08:
|
||||
continue
|
||||
rows.append((ret_1d, vol, symbol))
|
||||
if len(rows) < spec.top_n * 2:
|
||||
return {}
|
||||
rows.sort(key=lambda row: row[0])
|
||||
longs = [symbol for _, _, symbol in rows[: spec.top_n]]
|
||||
shorts = [symbol for _, _, symbol in rows[-spec.top_n :]]
|
||||
return self._long_short_weights(longs, shorts)
|
||||
|
||||
def _chop_quality_rotation(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
long_rows: list[tuple[float, str]] = []
|
||||
short_rows: list[tuple[float, str]] = []
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
rs_7d = ret_7d - btc_ret_7d
|
||||
long_rows.append((rs_7d - ret_1d, symbol))
|
||||
short_rows.append((-rs_7d + ret_1d, symbol))
|
||||
long_rows.sort(reverse=True)
|
||||
short_rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in long_rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in short_rows[: spec.top_n]]
|
||||
shorts = [symbol for symbol in shorts if symbol not in longs]
|
||||
return self._long_short_weights(longs, shorts[: spec.top_n])
|
||||
|
||||
def _carry_only(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
if symbol not in self.funding_frames:
|
||||
continue
|
||||
f_hist = self.funding_frames[symbol].loc[:timestamp].tail(21)
|
||||
if len(f_hist) < 21:
|
||||
continue
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
basis_vol = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
expected_edge = mean_funding * 18 + max(latest_basis, 0.0) * 0.35 - 0.0030 - basis_vol * 1.5
|
||||
if expected_edge <= 0:
|
||||
continue
|
||||
candidates.append((expected_edge, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
weight = 1.0 / len(symbols)
|
||||
return {f"carry:{symbol}": weight for symbol in symbols}
|
||||
|
||||
def _carry_only_strict(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
if symbol not in self.funding_frames:
|
||||
continue
|
||||
f_hist = self.funding_frames[symbol].loc[:timestamp].tail(21)
|
||||
if len(f_hist) < 21:
|
||||
continue
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
basis_vol = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
positive_ratio = float((f_hist["funding_rate"] > 0).mean())
|
||||
expected_edge = mean_funding * 18 + max(latest_basis, 0.0) * 0.35 - 0.0030 - basis_vol * 1.5
|
||||
if expected_edge <= 0.004 or positive_ratio < 0.75:
|
||||
continue
|
||||
candidates.append((expected_edge, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
return {f"carry:{symbol}": 1.0 / len(symbols) for symbol in symbols}
|
||||
|
||||
def _inverse_carry(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp, *, strict: bool) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
min_edge = 0.004 if strict else 0.001
|
||||
min_negative_ratio = 0.75 if strict else 0.60
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
if symbol not in self.funding_frames:
|
||||
continue
|
||||
f_hist = self.funding_frames[symbol].loc[:timestamp].tail(21)
|
||||
if len(f_hist) < 21:
|
||||
continue
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
negative_ratio = float((f_hist["funding_rate"] < 0).mean())
|
||||
basis_vol = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
expected_edge = (-mean_funding) * 18 + max(-latest_basis, 0.0) * 0.35 - 0.0030 - basis_vol * 1.5
|
||||
if mean_funding >= 0 or negative_ratio < min_negative_ratio or expected_edge <= min_edge:
|
||||
continue
|
||||
candidates.append((expected_edge, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
return {f"inverse_carry:{symbol}": 1.0 / len(symbols) for symbol in symbols}
|
||||
|
||||
def _chop_rs_spread(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
rows: list[tuple[float, str]] = []
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
vol = float(hist["close"].pct_change().dropna().tail(12).std(ddof=0))
|
||||
if vol <= 0 or vol > 0.10:
|
||||
continue
|
||||
rows.append((ret_7d - btc_ret_7d - abs(ret_1d) * 0.25, symbol))
|
||||
if len(rows) < spec.top_n * 2:
|
||||
return {}
|
||||
rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in rows[-spec.top_n :]]
|
||||
return self._long_short_weights(longs, shorts)
|
||||
|
||||
def _chop_btc_hedged_leader(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
funding = self._latest_funding(symbol, timestamp)
|
||||
rows.append((ret_7d - btc_ret_7d - max(funding, 0.0) * 80.0 - abs(ret_1d) * 0.15, symbol))
|
||||
rows.sort(reverse=True)
|
||||
if not rows or rows[0][0] <= 0:
|
||||
return {}
|
||||
leader = rows[0][1]
|
||||
return {leader: 0.70, BTC_SYMBOL: -0.30}
|
||||
|
||||
def _dist_btc_vs_weak_alt(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
weak_rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
weak_rows.append((ret_7d - btc_ret_7d + ret_3d, symbol))
|
||||
weak_rows.sort()
|
||||
shorts = [symbol for _, symbol in weak_rows[: spec.top_n]]
|
||||
if not shorts:
|
||||
return {}
|
||||
weights = {BTC_SYMBOL: 0.40}
|
||||
short_weight = -0.60 / len(shorts)
|
||||
for symbol in shorts:
|
||||
weights[symbol] = short_weight
|
||||
return weights
|
||||
|
||||
def _dist_short_rally(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
rows: list[tuple[float, str]] = []
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 36)
|
||||
if len(hist) < 25:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42 if len(hist) > 42 else len(hist) - 1)
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
ema20 = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
close = float(hist["close"].iloc[-1])
|
||||
rs = ret_7d - btc_ret_7d
|
||||
if rs >= -0.03 or ret_2b <= 0.0 or close >= float(ema20):
|
||||
continue
|
||||
score = -rs + ret_2b
|
||||
rows.append((score, symbol))
|
||||
rows.sort(reverse=True)
|
||||
shorts = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
return {symbol: -1.0 / len(shorts) for symbol in shorts} if shorts else {}
|
||||
|
||||
def _dist_weak_basket_short(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
rs = ret_7d - btc_ret_7d
|
||||
rows.append((rs + ret_3d, symbol))
|
||||
rows.sort()
|
||||
shorts = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
return {symbol: -1.0 / len(shorts) for symbol in shorts} if shorts else {}
|
||||
|
||||
def _dist_relative_weakness_spread(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
rows.append((ret_7d - btc_ret_7d + ret_3d * 0.25, symbol))
|
||||
if len(rows) < spec.top_n * 2:
|
||||
return {}
|
||||
rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in rows[-spec.top_n :]]
|
||||
return self._long_short_weights(longs, shorts)
|
||||
|
||||
def _dist_btc_rally_short(self, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
hist = self._price_hist(BTC_SYMBOL, timestamp, 36)
|
||||
if len(hist) < 21:
|
||||
return {}
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
ema20 = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
close = float(hist["close"].iloc[-1])
|
||||
if ret_2b <= 0.0 or close >= float(ema20):
|
||||
return {}
|
||||
return {BTC_SYMBOL: -1.0}
|
||||
|
||||
def _dist_btc_rally_short_strict(self, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
hist = self._price_hist(BTC_SYMBOL, timestamp, 72)
|
||||
if len(hist) < 43:
|
||||
return {}
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ema20 = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
ema50 = hist["close"].ewm(span=50, adjust=False).mean().iloc[-1]
|
||||
close = float(hist["close"].iloc[-1])
|
||||
if ret_2b < 0.035 or ret_7d > -0.02:
|
||||
return {}
|
||||
if close >= float(ema20) or close >= float(ema50):
|
||||
return {}
|
||||
return {BTC_SYMBOL: -1.0}
|
||||
|
||||
def _dist_weak_rally_spread(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
strong_rows: list[tuple[float, str]] = []
|
||||
weak_rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
rs = ret_7d - btc_ret_7d
|
||||
strong_rows.append((rs - abs(ret_2b) * 0.1, symbol))
|
||||
if ret_2b > 0:
|
||||
weak_rows.append((-rs + ret_2b, symbol))
|
||||
strong_rows.sort(reverse=True)
|
||||
weak_rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in strong_rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in weak_rows[: spec.top_n] if symbol not in longs]
|
||||
return self._long_short_weights(longs, shorts[: spec.top_n])
|
||||
|
||||
def _liquid_symbols(self, timestamp: pd.Timestamp, min_avg_dollar_volume: float) -> list[str]:
|
||||
return [
|
||||
symbol
|
||||
for symbol in select_tradeable_universe(
|
||||
self.bundle.prices,
|
||||
timestamp,
|
||||
min_history_bars=120,
|
||||
min_avg_dollar_volume=min_avg_dollar_volume,
|
||||
)
|
||||
if symbol != BTC_SYMBOL
|
||||
]
|
||||
|
||||
def _price_hist(self, symbol: str, timestamp: pd.Timestamp, bars: int) -> pd.DataFrame:
|
||||
return self.price_frames[symbol].loc[:timestamp].tail(bars).reset_index()
|
||||
|
||||
def _return_from_hist(self, hist: pd.DataFrame, bars_back: int) -> float:
|
||||
if hist.empty:
|
||||
return 0.0
|
||||
back = min(bars_back, len(hist) - 1)
|
||||
if back <= 0:
|
||||
return 0.0
|
||||
prev_close = float(hist["close"].iloc[-(back + 1)])
|
||||
close = float(hist["close"].iloc[-1])
|
||||
if prev_close <= 0:
|
||||
return 0.0
|
||||
return close / prev_close - 1.0
|
||||
|
||||
def _latest_funding(self, symbol: str, timestamp: pd.Timestamp) -> float:
|
||||
if symbol not in self.funding_frames:
|
||||
return 0.0
|
||||
hist = self.funding_frames[symbol].loc[:timestamp].tail(1)
|
||||
if hist.empty:
|
||||
return 0.0
|
||||
return float(hist["funding_rate"].iloc[-1])
|
||||
|
||||
def _portfolio_return(self, weights: dict[str, float], prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
total = 0.0
|
||||
for symbol, weight in weights.items():
|
||||
if symbol.startswith("carry:"):
|
||||
total += weight * self._carry_return(symbol.split(":", 1)[1], prev_ts, ts)
|
||||
elif symbol.startswith("inverse_carry:"):
|
||||
total += weight * self._inverse_carry_return(symbol.split(":", 1)[1], prev_ts, ts)
|
||||
else:
|
||||
total += weight * self._price_return(symbol, prev_ts, ts)
|
||||
return total
|
||||
|
||||
def _price_return(self, symbol: str, prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
frame = self.price_frames[symbol]
|
||||
if prev_ts not in frame.index or ts not in frame.index:
|
||||
return 0.0
|
||||
prev_close = float(frame.loc[prev_ts, "close"])
|
||||
close = float(frame.loc[ts, "close"])
|
||||
if prev_close <= 0:
|
||||
return 0.0
|
||||
return close / prev_close - 1.0
|
||||
|
||||
def _carry_return(self, symbol: str, prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
if symbol not in self.funding_frames:
|
||||
return 0.0
|
||||
frame = self.funding_frames[symbol]
|
||||
if prev_ts not in frame.index or ts not in frame.index:
|
||||
return 0.0
|
||||
funding_rate = float(frame.loc[ts, "funding_rate"])
|
||||
basis_change = float(frame.loc[ts, "basis"] - frame.loc[prev_ts, "basis"])
|
||||
return funding_rate - basis_change
|
||||
|
||||
def _inverse_carry_return(self, symbol: str, prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
if symbol not in self.funding_frames:
|
||||
return 0.0
|
||||
frame = self.funding_frames[symbol]
|
||||
if prev_ts not in frame.index or ts not in frame.index:
|
||||
return 0.0
|
||||
funding_rate = float(frame.loc[ts, "funding_rate"])
|
||||
basis_change = float(frame.loc[ts, "basis"] - frame.loc[prev_ts, "basis"])
|
||||
return -funding_rate + basis_change
|
||||
|
||||
@staticmethod
|
||||
def _turnover(current: dict[str, float], target: dict[str, float]) -> float:
|
||||
symbols = set(current) | set(target)
|
||||
return sum(abs(target.get(symbol, 0.0) - current.get(symbol, 0.0)) for symbol in symbols)
|
||||
|
||||
@staticmethod
|
||||
def _equal_weight(symbols: list[str], gross: float) -> dict[str, float]:
|
||||
if not symbols:
|
||||
return {}
|
||||
weight = gross / len(symbols)
|
||||
return {symbol: weight for symbol in symbols}
|
||||
|
||||
@staticmethod
|
||||
def _long_short_weights(longs: list[str], shorts: list[str]) -> dict[str, float]:
|
||||
weights: dict[str, float] = {}
|
||||
if longs:
|
||||
long_weight = 0.50 / len(longs)
|
||||
for symbol in longs:
|
||||
weights[symbol] = weights.get(symbol, 0.0) + long_weight
|
||||
if shorts:
|
||||
short_weight = -0.50 / len(shorts)
|
||||
for symbol in shorts:
|
||||
weights[symbol] = weights.get(symbol, 0.0) + short_weight
|
||||
return {symbol: weight for symbol, weight in weights.items() if abs(weight) > 1e-9}
|
||||
|
||||
|
||||
def run_adverse_regime_search(
|
||||
*,
|
||||
cache_path: str | Path,
|
||||
eval_days: int = 1825,
|
||||
initial_capital: float = 1000.0,
|
||||
) -> dict[str, object]:
|
||||
bundle, latest_bar, accepted_symbols = load_fixed66_cache(cache_path)
|
||||
harness = AdverseRegimeResearchHarness(bundle, latest_bar)
|
||||
eval_start = pd.Timestamp(latest_bar) - pd.Timedelta(days=eval_days)
|
||||
|
||||
rows: list[dict[str, object]] = []
|
||||
by_regime: dict[str, list[dict[str, object]]] = {}
|
||||
for spec in default_engine_specs():
|
||||
result = harness.run_engine(spec, eval_start=eval_start, initial_capital=initial_capital)
|
||||
payload = result.to_payload()
|
||||
print(
|
||||
spec.target_regime,
|
||||
spec.name,
|
||||
f"ret={float(payload['total_return']) * 100:.2f}%",
|
||||
f"sharpe={float(payload['sharpe']):.2f}",
|
||||
f"mdd={float(payload['max_drawdown']) * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
rows.append(payload)
|
||||
by_regime.setdefault(spec.target_regime, []).append(payload)
|
||||
|
||||
for regime_rows in by_regime.values():
|
||||
regime_rows.sort(key=lambda row: (float(row["total_return"]), float(row["sharpe"]), -abs(float(row["max_drawdown"]))), reverse=True)
|
||||
|
||||
return {
|
||||
"analysis": "adverse_regime_engine_search",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"accepted_symbols": accepted_symbols,
|
||||
"eval_days": eval_days,
|
||||
"initial_capital": initial_capital,
|
||||
"results": rows,
|
||||
"by_regime": by_regime,
|
||||
}
|
||||
326
research/hybrid_regime.py
Normal file
326
research/hybrid_regime.py
Normal file
@@ -0,0 +1,326 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.backtest.metrics import cagr, max_drawdown, sharpe_ratio
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy29.common.models import AllocationDecision, BacktestResult, MarketDataBundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester, Strategy32MomentumCarryBacktester, build_engine_config
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness, default_engine_specs
|
||||
from strategy32.scripts.run_regime_filter_analysis import build_strategic_regime_frame
|
||||
|
||||
|
||||
STATIC_FILTERS: dict[str, dict[str, float]] = {
|
||||
"prev_balanced": {
|
||||
"universe_min_avg_dollar_volume": 50_000_000.0,
|
||||
"momentum_min_score": 0.60,
|
||||
"momentum_min_relative_strength": 0.00,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"max_pairwise_correlation": 0.70,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
"guarded_positive": {
|
||||
"universe_min_avg_dollar_volume": 50_000_000.0,
|
||||
"momentum_min_score": 0.60,
|
||||
"momentum_min_relative_strength": 0.00,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"momentum_max_7d_return": 0.35,
|
||||
"momentum_min_positive_bar_ratio": 0.52,
|
||||
"momentum_max_short_volatility": 0.075,
|
||||
"momentum_max_beta": 2.50,
|
||||
"momentum_max_latest_funding_rate": 0.00045,
|
||||
"max_pairwise_correlation": 0.70,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
"overheat_tolerant": {
|
||||
"universe_min_avg_dollar_volume": 100_000_000.0,
|
||||
"momentum_min_score": 0.60,
|
||||
"momentum_min_relative_strength": -0.02,
|
||||
"momentum_min_7d_return": 0.02,
|
||||
"max_pairwise_correlation": 0.78,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
"guarded_euphoria": {
|
||||
"universe_min_avg_dollar_volume": 100_000_000.0,
|
||||
"momentum_min_score": 0.62,
|
||||
"momentum_min_relative_strength": -0.01,
|
||||
"momentum_min_7d_return": 0.02,
|
||||
"momentum_max_7d_return": 0.28,
|
||||
"momentum_min_positive_bar_ratio": 0.55,
|
||||
"momentum_max_short_volatility": 0.070,
|
||||
"momentum_max_beta": 2.20,
|
||||
"momentum_max_latest_funding_rate": 0.00035,
|
||||
"max_pairwise_correlation": 0.72,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
}
|
||||
|
||||
STATIC_FILTER_ATTRS = tuple(sorted({key for overrides in STATIC_FILTERS.values() for key in overrides}))
|
||||
STATIC_COMPONENT_MAP = {
|
||||
"MOMENTUM_EXPANSION": "prev_balanced",
|
||||
"EUPHORIC_BREAKOUT": "overheat_tolerant",
|
||||
}
|
||||
ADVERSE_COMPONENT_MAP = {
|
||||
"CAPITULATION_STRESS": "cap_btc_rebound",
|
||||
"CHOPPY_ROTATION": "chop_inverse_carry_strict",
|
||||
"DISTRIBUTION_DRIFT": "dist_inverse_carry_strict",
|
||||
}
|
||||
ADVERSE_REGIMES = {"CAPITULATION_STRESS", "CHOPPY_ROTATION", "DISTRIBUTION_DRIFT"}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class HybridWindowResult:
|
||||
label: str
|
||||
start: pd.Timestamp
|
||||
end: pd.Timestamp
|
||||
total_return: float
|
||||
annualized_return: float
|
||||
sharpe: float
|
||||
max_drawdown: float
|
||||
component_map: dict[str, str]
|
||||
|
||||
def to_payload(self) -> dict[str, object]:
|
||||
return {
|
||||
"start": str(self.start),
|
||||
"end": str(self.end),
|
||||
"total_return": self.total_return,
|
||||
"annualized_return": self.annualized_return,
|
||||
"sharpe": self.sharpe,
|
||||
"max_drawdown": self.max_drawdown,
|
||||
"component_map": self.component_map,
|
||||
}
|
||||
|
||||
|
||||
class StrategicRegimeFilterBacktester(Strategy32MomentumCarryBacktester):
|
||||
def __init__(
|
||||
self,
|
||||
strategy_config,
|
||||
data: MarketDataBundle,
|
||||
*,
|
||||
trade_start: pd.Timestamp,
|
||||
strategic_regime_map: dict[pd.Timestamp, str],
|
||||
active_regime: str,
|
||||
default_filter_name: str,
|
||||
filter_plan: dict[pd.Timestamp, str] | None = None,
|
||||
):
|
||||
self._strategic_regime_map = strategic_regime_map
|
||||
self._active_regime = active_regime
|
||||
self._default_filter_name = default_filter_name
|
||||
self._filter_plan = filter_plan or {}
|
||||
super().__init__(strategy_config, data, trade_start=trade_start)
|
||||
|
||||
def _govern_decision(
|
||||
self,
|
||||
decision: AllocationDecision,
|
||||
*,
|
||||
signal_timestamp: pd.Timestamp,
|
||||
current_equity: float,
|
||||
equity_history: list[float],
|
||||
) -> AllocationDecision:
|
||||
governed = super()._govern_decision(
|
||||
decision,
|
||||
signal_timestamp=signal_timestamp,
|
||||
current_equity=current_equity,
|
||||
equity_history=equity_history,
|
||||
)
|
||||
if self._strategic_regime_map.get(signal_timestamp) != self._active_regime:
|
||||
return AllocationDecision(
|
||||
regime=governed.regime,
|
||||
momentum_budget_pct=0.0,
|
||||
carry_budget_pct=0.0,
|
||||
spread_budget_pct=0.0,
|
||||
cash_budget_pct=1.0,
|
||||
)
|
||||
return governed
|
||||
|
||||
def _rebalance(
|
||||
self,
|
||||
portfolio,
|
||||
signal_timestamp: pd.Timestamp,
|
||||
execution_timestamp: pd.Timestamp,
|
||||
decision: AllocationDecision,
|
||||
rebalance_momentum: bool,
|
||||
rebalance_carry: bool,
|
||||
rebalance_spread: bool,
|
||||
) -> list:
|
||||
originals = {attr: getattr(self.strategy_config, attr) for attr in STATIC_FILTER_ATTRS}
|
||||
try:
|
||||
filter_name = self._filter_plan.get(signal_timestamp, self._default_filter_name)
|
||||
for attr, value in STATIC_FILTERS[filter_name].items():
|
||||
setattr(self.strategy_config, attr, value)
|
||||
return super()._rebalance(
|
||||
portfolio,
|
||||
signal_timestamp,
|
||||
execution_timestamp,
|
||||
decision,
|
||||
rebalance_momentum,
|
||||
rebalance_carry,
|
||||
rebalance_spread,
|
||||
)
|
||||
finally:
|
||||
for attr, value in originals.items():
|
||||
setattr(self.strategy_config, attr, value)
|
||||
|
||||
|
||||
def load_fixed66_bundle(path: str | Path) -> tuple[MarketDataBundle, pd.Timestamp]:
|
||||
payload = pickle.loads(Path(path).read_bytes())
|
||||
return payload["bundle"], pd.Timestamp(payload["latest_bar"])
|
||||
|
||||
|
||||
def _run_static_component_curve(
|
||||
*,
|
||||
sliced: MarketDataBundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
eval_start: pd.Timestamp,
|
||||
regime_map: dict[pd.Timestamp, str],
|
||||
active_regime: str,
|
||||
filter_name: str,
|
||||
filter_plan: dict[pd.Timestamp, str] | None = None,
|
||||
) -> pd.Series:
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = StrategicRegimeFilterBacktester(
|
||||
cfg,
|
||||
sliced,
|
||||
trade_start=eval_start,
|
||||
strategic_regime_map=regime_map,
|
||||
active_regime=active_regime,
|
||||
default_filter_name=filter_name,
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
backtester.config.initial_capital = build_engine_config().initial_capital
|
||||
result = backtester.run()
|
||||
return result.equity_curve.loc[result.equity_curve.index >= eval_start]
|
||||
|
||||
|
||||
def _run_adverse_component_curve(
|
||||
*,
|
||||
eval_start: pd.Timestamp,
|
||||
engine_name: str,
|
||||
harness: AdverseRegimeResearchHarness,
|
||||
regime_frame: pd.DataFrame,
|
||||
) -> pd.Series:
|
||||
spec = next(spec for spec in default_engine_specs() if spec.name == engine_name)
|
||||
result = harness.run_engine(spec, eval_start=eval_start, initial_capital=1000.0, regime_frame=regime_frame)
|
||||
return result.equity_curve.loc[result.equity_curve.index >= eval_start]
|
||||
|
||||
|
||||
def _curve_returns(curve: pd.Series) -> pd.Series:
|
||||
return curve.pct_change().fillna(0.0)
|
||||
|
||||
|
||||
def _annualized_return(total_return: float, days: int) -> float:
|
||||
if days <= 0:
|
||||
return 0.0
|
||||
return (1.0 + total_return) ** (365.0 / days) - 1.0
|
||||
|
||||
|
||||
def _build_positive_filter_plan(regime_frame: pd.DataFrame, active_regime: str) -> dict[pd.Timestamp, str]:
|
||||
frame = regime_frame.sort_values("timestamp").copy()
|
||||
frame["is_adverse"] = frame["strategic_regime"].isin(ADVERSE_REGIMES).astype(float)
|
||||
frame["recent_adverse_share"] = frame["is_adverse"].rolling(18, min_periods=1).mean()
|
||||
plan: dict[pd.Timestamp, str] = {}
|
||||
for row in frame.itertuples(index=False):
|
||||
ts = pd.Timestamp(row.timestamp)
|
||||
if active_regime == "MOMENTUM_EXPANSION":
|
||||
guarded = float(row.recent_adverse_share) >= 0.40 or float(row.breadth_persist) < 0.58
|
||||
plan[ts] = "guarded_positive" if guarded else "prev_balanced"
|
||||
elif active_regime == "EUPHORIC_BREAKOUT":
|
||||
guarded = float(row.recent_adverse_share) >= 0.25 or float(row.funding_persist) < 0.72
|
||||
plan[ts] = "guarded_euphoria" if guarded else "overheat_tolerant"
|
||||
return plan
|
||||
|
||||
|
||||
def run_hybrid_backtest(
|
||||
*,
|
||||
cache_path: str | Path = "/tmp/strategy32_fixed66_bundle.pkl",
|
||||
windows: tuple[tuple[int, str], ...] = ((365, "1y"), (730, "2y"), (1095, "3y"), (1460, "4y"), (1825, "5y")),
|
||||
) -> dict[str, object]:
|
||||
bundle, latest_bar = load_fixed66_bundle(cache_path)
|
||||
payload: dict[str, object] = {
|
||||
"analysis": "fixed66_hybrid_regime_backtest",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"static_component_map": STATIC_COMPONENT_MAP,
|
||||
"adverse_component_map": ADVERSE_COMPONENT_MAP,
|
||||
"results": {},
|
||||
}
|
||||
|
||||
for days, label in windows:
|
||||
eval_start = latest_bar - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
regime_frame = build_strategic_regime_frame(sliced, eval_start, latest_bar)
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
|
||||
|
||||
component_curves: dict[str, pd.Series] = {}
|
||||
for regime_name, filter_name in STATIC_COMPONENT_MAP.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, regime_name)
|
||||
component_curves[regime_name] = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=latest_bar,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime=regime_name,
|
||||
filter_name=filter_name,
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
for regime_name, engine_name in ADVERSE_COMPONENT_MAP.items():
|
||||
component_curves[regime_name] = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=engine_name,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
|
||||
return_frames = {name: _curve_returns(curve) for name, curve in component_curves.items()}
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
equity = 1000.0
|
||||
equity_idx = [timestamps[0]]
|
||||
equity_values = [equity]
|
||||
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = timestamps[i - 1]
|
||||
execution_ts = timestamps[i]
|
||||
regime_name = regime_map.get(signal_ts, "")
|
||||
ret = float(return_frames.get(regime_name, pd.Series(dtype=float)).get(execution_ts, 0.0))
|
||||
equity *= max(0.0, 1.0 + ret)
|
||||
equity_idx.append(execution_ts)
|
||||
equity_values.append(equity)
|
||||
|
||||
equity_curve = pd.Series(equity_values, index=pd.Index(equity_idx, name="timestamp"), dtype=float)
|
||||
total_return = float(equity_curve.iloc[-1] / equity_curve.iloc[0] - 1.0)
|
||||
payload["results"][label] = HybridWindowResult(
|
||||
label=label,
|
||||
start=pd.Timestamp(eval_start),
|
||||
end=pd.Timestamp(latest_bar),
|
||||
total_return=total_return,
|
||||
annualized_return=_annualized_return(total_return, days),
|
||||
sharpe=sharpe_ratio(equity_curve, 6),
|
||||
max_drawdown=max_drawdown(equity_curve),
|
||||
component_map={
|
||||
**{regime: filter_name for regime, filter_name in STATIC_COMPONENT_MAP.items()},
|
||||
**{regime: engine_name for regime, engine_name in ADVERSE_COMPONENT_MAP.items()},
|
||||
},
|
||||
).to_payload()
|
||||
print(
|
||||
label,
|
||||
f"ret={total_return * 100:.2f}%",
|
||||
f"ann={payload['results'][label]['annualized_return'] * 100:.2f}%",
|
||||
f"sharpe={payload['results'][label]['sharpe']:.2f}",
|
||||
f"mdd={payload['results'][label]['max_drawdown'] * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
return payload
|
||||
|
||||
|
||||
def write_hybrid_backtest(out_path: str | Path = "/tmp/strategy32_hybrid_regime_backtest.json") -> Path:
|
||||
payload = run_hybrid_backtest()
|
||||
out = Path(out_path)
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
return out
|
||||
848
research/soft_router.py
Normal file
848
research/soft_router.py
Normal file
@@ -0,0 +1,848 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import multiprocessing as mp
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict, dataclass
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness, default_engine_specs
|
||||
from strategy32.research.hybrid_regime import (
|
||||
STATIC_FILTERS,
|
||||
_curve_returns,
|
||||
_run_adverse_component_curve,
|
||||
load_fixed66_bundle,
|
||||
)
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.scripts.run_regime_filter_analysis import STRATEGIC_REGIME_PROFILES, build_strategic_regime_frame
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class SoftRouterCandidate:
|
||||
regime_profile: str
|
||||
core_filter: str
|
||||
cap_engine: str
|
||||
chop_engine: str
|
||||
dist_engine: str
|
||||
core_floor: float
|
||||
cap_max_weight: float
|
||||
chop_max_weight: float
|
||||
dist_max_weight: float
|
||||
chop_blend_floor: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"{self.regime_profile}"
|
||||
f"|core:{self.core_filter}"
|
||||
f"|cap:{self.cap_engine}"
|
||||
f"|chop:{self.chop_engine}"
|
||||
f"|dist:{self.dist_engine}"
|
||||
f"|floor:{self.core_floor:.2f}"
|
||||
f"|capw:{self.cap_max_weight:.2f}"
|
||||
f"|chopw:{self.chop_max_weight:.2f}"
|
||||
f"|distw:{self.dist_max_weight:.2f}"
|
||||
f"|chopf:{self.chop_blend_floor:.2f}"
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CashOverlayCandidate:
|
||||
regime_profile: str
|
||||
core_filter: str
|
||||
cap_engine: str
|
||||
chop_engine: str
|
||||
dist_engine: str
|
||||
cap_cash_weight: float
|
||||
chop_cash_weight: float
|
||||
dist_cash_weight: float
|
||||
cap_threshold: float
|
||||
chop_threshold: float
|
||||
dist_threshold: float
|
||||
core_block_threshold: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"{self.regime_profile}"
|
||||
f"|core:{self.core_filter}"
|
||||
f"|cap:{self.cap_engine}"
|
||||
f"|chop:{self.chop_engine}"
|
||||
f"|dist:{self.dist_engine}"
|
||||
f"|capcw:{self.cap_cash_weight:.2f}"
|
||||
f"|chopcw:{self.chop_cash_weight:.2f}"
|
||||
f"|distcw:{self.dist_cash_weight:.2f}"
|
||||
f"|capth:{self.cap_threshold:.2f}"
|
||||
f"|chopth:{self.chop_threshold:.2f}"
|
||||
f"|distth:{self.dist_threshold:.2f}"
|
||||
f"|block:{self.core_block_threshold:.2f}"
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class MacroScaleSpec:
|
||||
floor: float
|
||||
close_gap_start: float
|
||||
close_gap_full: float
|
||||
fast_gap_start: float
|
||||
fast_gap_full: float
|
||||
close_weight: float = 0.60
|
||||
fast_weeks: int = 10
|
||||
slow_weeks: int = 30
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"floor:{self.floor:.2f}"
|
||||
f"|close:{self.close_gap_start:.3f}->{self.close_gap_full:.3f}"
|
||||
f"|fast:{self.fast_gap_start:.3f}->{self.fast_gap_full:.3f}"
|
||||
f"|w:{self.close_weight:.2f}"
|
||||
)
|
||||
|
||||
|
||||
WINDOWS = (
|
||||
(365, "1y"),
|
||||
(730, "2y"),
|
||||
(1095, "3y"),
|
||||
(1460, "4y"),
|
||||
(1825, "5y"),
|
||||
)
|
||||
|
||||
YEAR_PERIODS = (
|
||||
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
|
||||
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
|
||||
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
|
||||
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
|
||||
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
|
||||
)
|
||||
|
||||
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
|
||||
|
||||
|
||||
def _clip01(value: float) -> float:
|
||||
return min(max(float(value), 0.0), 1.0)
|
||||
|
||||
|
||||
def _ramp(value: float, start: float, end: float) -> float:
|
||||
if end == start:
|
||||
return 1.0 if value >= end else 0.0
|
||||
if value <= start:
|
||||
return 0.0
|
||||
if value >= end:
|
||||
return 1.0
|
||||
return (value - start) / (end - start)
|
||||
|
||||
|
||||
def _inverse_ramp(value: float, start: float, end: float) -> float:
|
||||
if end == start:
|
||||
return 1.0 if value <= end else 0.0
|
||||
if value >= start:
|
||||
return 0.0
|
||||
if value <= end:
|
||||
return 1.0
|
||||
return (start - value) / (start - end)
|
||||
|
||||
|
||||
def build_regime_score_frame(
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
eval_end: pd.Timestamp,
|
||||
*,
|
||||
profile_name: str,
|
||||
) -> pd.DataFrame:
|
||||
profile = STRATEGIC_REGIME_PROFILES[profile_name]
|
||||
frame = build_strategic_regime_frame(bundle, eval_start, eval_end, profile=profile).copy()
|
||||
|
||||
panic_scores: list[float] = []
|
||||
euphoria_scores: list[float] = []
|
||||
expansion_scores: list[float] = []
|
||||
distribution_scores: list[float] = []
|
||||
choppy_scores: list[float] = []
|
||||
core_scores: list[float] = []
|
||||
|
||||
for row in frame.itertuples(index=False):
|
||||
breadth = float(row.breadth)
|
||||
breadth_persist = float(row.breadth_persist) if pd.notna(row.breadth_persist) else breadth
|
||||
atr = float(row.atr_pct) if pd.notna(row.atr_pct) else 0.0
|
||||
bar_ret = float(row.bar_return) if pd.notna(row.bar_return) else 0.0
|
||||
daily_gap = float(row.daily_trend_gap) if pd.notna(row.daily_trend_gap) else 0.0
|
||||
intra_gap = float(row.intraday_trend_gap) if pd.notna(row.intraday_trend_gap) else 0.0
|
||||
avg_funding = float(row.mean_alt_funding)
|
||||
positive_ratio = float(row.positive_funding_ratio)
|
||||
funding_persist = float(row.funding_persist) if pd.notna(row.funding_persist) else positive_ratio
|
||||
btc_7d = float(row.btc_7d_return)
|
||||
|
||||
panic_score = max(
|
||||
_ramp(atr, profile.panic_atr * 0.85, profile.panic_atr * 1.35),
|
||||
_ramp(-bar_ret, abs(profile.panic_bar_return) * 0.75, abs(profile.panic_bar_return) * 1.35),
|
||||
min(
|
||||
_ramp(profile.panic_breadth - breadth, 0.0, max(profile.panic_breadth, 0.15)),
|
||||
_ramp(profile.panic_funding - avg_funding, 0.0, abs(profile.panic_funding) + 0.00015),
|
||||
),
|
||||
)
|
||||
|
||||
euphoria_components = [
|
||||
_ramp(daily_gap, profile.euphoria_daily_gap * 0.75, profile.euphoria_daily_gap * 1.6),
|
||||
_ramp(intra_gap, profile.euphoria_intraday_gap * 0.6, profile.euphoria_intraday_gap * 1.8),
|
||||
_ramp(breadth, profile.euphoria_breadth - 0.05, min(profile.euphoria_breadth + 0.12, 0.95)),
|
||||
_ramp(breadth_persist, profile.euphoria_breadth_persist - 0.06, min(profile.euphoria_breadth_persist + 0.12, 0.95)),
|
||||
_ramp(positive_ratio, profile.euphoria_positive_ratio - 0.08, min(profile.euphoria_positive_ratio + 0.12, 0.98)),
|
||||
_ramp(funding_persist, profile.euphoria_funding_persist - 0.08, min(profile.euphoria_funding_persist + 0.12, 0.98)),
|
||||
max(
|
||||
_ramp(avg_funding, profile.euphoria_funding * 0.5, max(profile.euphoria_funding * 2.0, profile.euphoria_funding + 0.00008)),
|
||||
_ramp(btc_7d, profile.euphoria_btc_7d * 0.6, max(profile.euphoria_btc_7d * 1.8, profile.euphoria_btc_7d + 0.08)),
|
||||
),
|
||||
]
|
||||
euphoria_score = sum(euphoria_components) / len(euphoria_components)
|
||||
|
||||
expansion_components = [
|
||||
_ramp(daily_gap, max(profile.expansion_daily_gap - 0.02, -0.02), profile.expansion_daily_gap + 0.06),
|
||||
_ramp(intra_gap, profile.expansion_intraday_gap - 0.01, profile.expansion_intraday_gap + 0.05),
|
||||
_ramp(breadth, profile.expansion_breadth - 0.06, min(profile.expansion_breadth + 0.14, 0.92)),
|
||||
_ramp(breadth_persist, profile.expansion_breadth_persist - 0.06, min(profile.expansion_breadth_persist + 0.14, 0.92)),
|
||||
_inverse_ramp(atr, profile.expansion_atr * 1.10, max(profile.expansion_atr * 0.60, 0.015)),
|
||||
_ramp(avg_funding, profile.expansion_min_funding - 0.00005, profile.expansion_min_funding + 0.00015),
|
||||
_ramp(btc_7d, profile.expansion_btc_7d - 0.04, profile.expansion_btc_7d + 0.10),
|
||||
]
|
||||
expansion_score = sum(expansion_components) / len(expansion_components)
|
||||
expansion_score *= 1.0 - 0.55 * euphoria_score
|
||||
|
||||
distribution_components = [
|
||||
max(
|
||||
_ramp(profile.distribution_daily_gap - daily_gap, 0.0, abs(profile.distribution_daily_gap) + 0.05),
|
||||
_ramp(profile.distribution_intraday_gap - intra_gap, 0.0, abs(profile.distribution_intraday_gap) + 0.04),
|
||||
),
|
||||
_ramp(profile.distribution_breadth - breadth, 0.0, max(profile.distribution_breadth, 0.18)),
|
||||
_ramp(profile.distribution_positive_ratio - positive_ratio, 0.0, max(profile.distribution_positive_ratio, 0.18)),
|
||||
_ramp(-avg_funding, 0.0, 0.00020),
|
||||
]
|
||||
distribution_score = sum(distribution_components) / len(distribution_components)
|
||||
distribution_score *= 1.0 - 0.35 * panic_score
|
||||
|
||||
trendlessness = 1.0 - max(
|
||||
_clip01(abs(daily_gap) / max(profile.euphoria_daily_gap, 0.03)),
|
||||
_clip01(abs(intra_gap) / max(profile.euphoria_intraday_gap, 0.015)),
|
||||
)
|
||||
centered_breadth = 1.0 - min(abs(breadth - 0.5) / 0.30, 1.0)
|
||||
funding_neutral = 1.0 - min(abs(avg_funding) / 0.00012, 1.0)
|
||||
choppy_score = (trendlessness + centered_breadth + funding_neutral) / 3.0
|
||||
choppy_score *= 1.0 - max(euphoria_score, expansion_score, distribution_score, panic_score) * 0.65
|
||||
choppy_score = max(choppy_score, 0.0)
|
||||
|
||||
core_score = max(expansion_score, euphoria_score)
|
||||
|
||||
panic_scores.append(_clip01(panic_score))
|
||||
euphoria_scores.append(_clip01(euphoria_score))
|
||||
expansion_scores.append(_clip01(expansion_score))
|
||||
distribution_scores.append(_clip01(distribution_score))
|
||||
choppy_scores.append(_clip01(choppy_score))
|
||||
core_scores.append(_clip01(core_score))
|
||||
|
||||
frame["panic_score"] = panic_scores
|
||||
frame["euphoria_score"] = euphoria_scores
|
||||
frame["expansion_score"] = expansion_scores
|
||||
frame["distribution_score"] = distribution_scores
|
||||
frame["choppy_score"] = choppy_scores
|
||||
frame["core_score"] = core_scores
|
||||
return frame
|
||||
|
||||
|
||||
def _annualized_return(total_return: float, days: int) -> float:
|
||||
if days <= 0:
|
||||
return 0.0
|
||||
return (1.0 + total_return) ** (365.0 / days) - 1.0
|
||||
|
||||
|
||||
def segment_metrics(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> dict[str, float]:
|
||||
segment = curve.loc[(curve.index >= start) & (curve.index <= end)].copy()
|
||||
if len(segment) < 2:
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": 0.0,
|
||||
"annualized_return": 0.0,
|
||||
"sharpe": 0.0,
|
||||
"max_drawdown": 0.0,
|
||||
}
|
||||
base = float(segment.iloc[0])
|
||||
if base <= 0:
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": 0.0,
|
||||
"annualized_return": 0.0,
|
||||
"sharpe": 0.0,
|
||||
"max_drawdown": 0.0,
|
||||
}
|
||||
normalized = segment / base * 1000.0
|
||||
total_return = float(normalized.iloc[-1] / normalized.iloc[0] - 1.0)
|
||||
days = max(int((end - start) / pd.Timedelta(days=1)), 1)
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": total_return,
|
||||
"annualized_return": _annualized_return(total_return, days),
|
||||
"sharpe": sharpe_ratio(normalized, 6),
|
||||
"max_drawdown": max_drawdown(normalized),
|
||||
}
|
||||
|
||||
|
||||
def score_candidate(window_results: dict[str, dict[str, float]], year_results: dict[str, dict[str, float]]) -> tuple[float, int, int]:
|
||||
year_returns = [float(metrics["total_return"]) for metrics in year_results.values()]
|
||||
negative_years = sum(ret < 0 for ret in year_returns)
|
||||
mdd_violations = sum(float(metrics["max_drawdown"]) < -0.20 for metrics in window_results.values())
|
||||
|
||||
score = 0.0
|
||||
score += 4.5 * float(window_results["5y"]["annualized_return"])
|
||||
score += 2.0 * float(window_results["1y"]["annualized_return"])
|
||||
score += 1.4 * float(window_results["2y"]["annualized_return"])
|
||||
score += 1.0 * float(window_results["4y"]["annualized_return"])
|
||||
score += 0.6 * float(window_results["3y"]["annualized_return"])
|
||||
score += 1.3 * float(window_results["5y"]["sharpe"])
|
||||
score += 0.6 * float(window_results["1y"]["sharpe"])
|
||||
score += 2.5 * min(year_returns)
|
||||
score += 0.7 * sum(max(ret, 0.0) for ret in year_returns)
|
||||
score -= 3.25 * negative_years
|
||||
score -= 0.9 * mdd_violations
|
||||
for label in ("1y", "2y", "3y", "4y", "5y"):
|
||||
score -= max(0.0, abs(float(window_results[label]["max_drawdown"])) - 0.20) * 5.0
|
||||
return score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def load_component_bundle(cache_path: str | None = None) -> tuple[object, pd.Timestamp]:
|
||||
return load_fixed66_bundle(cache_path or "/tmp/strategy32_fixed66_bundle.pkl")
|
||||
|
||||
|
||||
def compose_soft_router_curve(
|
||||
*,
|
||||
timestamps: list[pd.Timestamp],
|
||||
score_frame: pd.DataFrame,
|
||||
core_returns: pd.Series,
|
||||
cap_returns: pd.Series,
|
||||
chop_returns: pd.Series,
|
||||
dist_returns: pd.Series,
|
||||
candidate: SoftRouterCandidate,
|
||||
) -> tuple[pd.Series, pd.DataFrame]:
|
||||
score_map = score_frame.set_index("timestamp")[
|
||||
["core_score", "panic_score", "choppy_score", "distribution_score"]
|
||||
].sort_index()
|
||||
|
||||
equity = 1000.0
|
||||
idx = [timestamps[0]]
|
||||
vals = [equity]
|
||||
rows: list[dict[str, float | str]] = []
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = pd.Timestamp(timestamps[i - 1])
|
||||
execution_ts = pd.Timestamp(timestamps[i])
|
||||
score_row = score_map.loc[signal_ts] if signal_ts in score_map.index else None
|
||||
if score_row is None:
|
||||
core_score = panic_score = choppy_score = distribution_score = 0.0
|
||||
else:
|
||||
core_score = float(score_row["core_score"])
|
||||
panic_score = float(score_row["panic_score"])
|
||||
choppy_score = float(score_row["choppy_score"])
|
||||
distribution_score = float(score_row["distribution_score"])
|
||||
|
||||
cap_weight = candidate.cap_max_weight * panic_score
|
||||
dist_weight = candidate.dist_max_weight * distribution_score * (1.0 - 0.60 * panic_score)
|
||||
chop_signal = max(choppy_score, candidate.chop_blend_floor * (1.0 - core_score))
|
||||
chop_weight = candidate.chop_max_weight * chop_signal * (1.0 - 0.45 * panic_score)
|
||||
|
||||
overlay_weight = cap_weight + dist_weight + chop_weight
|
||||
if overlay_weight > 0.90:
|
||||
scale = 0.90 / overlay_weight
|
||||
cap_weight *= scale
|
||||
dist_weight *= scale
|
||||
chop_weight *= scale
|
||||
overlay_weight = 0.90
|
||||
|
||||
core_target = candidate.core_floor + (1.0 - candidate.core_floor) * core_score
|
||||
core_weight = max(0.0, core_target * (1.0 - overlay_weight))
|
||||
total_weight = core_weight + cap_weight + chop_weight + dist_weight
|
||||
if total_weight > 1.0:
|
||||
scale = 1.0 / total_weight
|
||||
core_weight *= scale
|
||||
cap_weight *= scale
|
||||
chop_weight *= scale
|
||||
dist_weight *= scale
|
||||
|
||||
bar_ret = (
|
||||
core_weight * float(core_returns.get(execution_ts, 0.0))
|
||||
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
|
||||
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
|
||||
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
|
||||
)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
idx.append(execution_ts)
|
||||
vals.append(equity)
|
||||
rows.append(
|
||||
{
|
||||
"timestamp": execution_ts,
|
||||
"core_weight": core_weight,
|
||||
"cap_weight": cap_weight,
|
||||
"chop_weight": chop_weight,
|
||||
"dist_weight": dist_weight,
|
||||
"cash_weight": max(0.0, 1.0 - core_weight - cap_weight - chop_weight - dist_weight),
|
||||
"core_score": core_score,
|
||||
"panic_score": panic_score,
|
||||
"choppy_score": choppy_score,
|
||||
"distribution_score": distribution_score,
|
||||
"portfolio_return": bar_ret,
|
||||
}
|
||||
)
|
||||
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
|
||||
weights = pd.DataFrame(rows)
|
||||
return curve, weights
|
||||
|
||||
|
||||
def build_period_components(
|
||||
*,
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
eval_end: pd.Timestamp,
|
||||
profile_name: str,
|
||||
core_filter: str,
|
||||
cap_engine: str,
|
||||
chop_engine: str,
|
||||
dist_engine: str,
|
||||
) -> dict[str, object]:
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
score_frame = build_regime_score_frame(sliced, eval_start, eval_end, profile_name=profile_name)
|
||||
regime_frame = score_frame.copy()
|
||||
harness = AdverseRegimeResearchHarness(sliced, eval_end)
|
||||
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[core_filter])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
core_curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
cap_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=cap_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
chop_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=chop_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
dist_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=dist_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
|
||||
return {
|
||||
"score_frame": score_frame,
|
||||
"timestamps": timestamps,
|
||||
"core_returns": _curve_returns(core_curve),
|
||||
"cap_returns": _curve_returns(cap_curve),
|
||||
"chop_returns": _curve_returns(chop_curve),
|
||||
"dist_returns": _curve_returns(dist_curve),
|
||||
}
|
||||
|
||||
|
||||
def build_cash_overlay_period_components(
|
||||
*,
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
eval_end: pd.Timestamp,
|
||||
profile_name: str,
|
||||
core_filter: str,
|
||||
cap_engine: str,
|
||||
chop_engine: str,
|
||||
dist_engine: str,
|
||||
core_config_overrides: dict[str, object] | None = None,
|
||||
macro_scale_spec: MacroScaleSpec | None = None,
|
||||
) -> dict[str, object]:
|
||||
raw_start = eval_start - pd.Timedelta(days=365 if macro_scale_spec is not None else 90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
score_frame = build_regime_score_frame(sliced, eval_start, eval_end, profile_name=profile_name)
|
||||
regime_frame = score_frame.copy()
|
||||
harness = AdverseRegimeResearchHarness(sliced, eval_end)
|
||||
|
||||
core_config = dict(STATIC_FILTERS[core_filter])
|
||||
core_config.update(core_config_overrides or {})
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **core_config)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
core_result = backtester.run()
|
||||
core_curve = core_result.equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
exposure_frame = pd.DataFrame(core_result.metadata.get("exposure_rows", []))
|
||||
if not exposure_frame.empty:
|
||||
exposure_frame = exposure_frame.loc[exposure_frame["timestamp"] >= eval_start].copy()
|
||||
|
||||
cap_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=cap_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
chop_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=chop_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
dist_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=dist_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
macro_scale_map = _build_macro_scale_map(
|
||||
sliced,
|
||||
timestamps=timestamps[:-1],
|
||||
spec=macro_scale_spec,
|
||||
)
|
||||
return {
|
||||
"score_frame": score_frame,
|
||||
"timestamps": timestamps,
|
||||
"core_returns": _curve_returns(core_curve),
|
||||
"core_exposure_frame": exposure_frame,
|
||||
"cap_returns": _curve_returns(cap_curve),
|
||||
"chop_returns": _curve_returns(chop_curve),
|
||||
"dist_returns": _curve_returns(dist_curve),
|
||||
"macro_scale_map": macro_scale_map,
|
||||
}
|
||||
|
||||
|
||||
def _build_macro_scale_map(
|
||||
bundle,
|
||||
*,
|
||||
timestamps: list[pd.Timestamp],
|
||||
spec: MacroScaleSpec | None,
|
||||
) -> pd.Series | None:
|
||||
if spec is None or not timestamps:
|
||||
return None
|
||||
btc_prices = bundle.prices.get("BTC")
|
||||
if btc_prices is None or btc_prices.empty:
|
||||
return None
|
||||
closes = btc_prices.set_index("timestamp")["close"].astype(float).sort_index()
|
||||
daily = closes.resample("1D").last().dropna()
|
||||
weekly = daily.resample("W-SUN").last().dropna()
|
||||
if weekly.empty:
|
||||
return None
|
||||
fast = weekly.ewm(span=spec.fast_weeks, adjust=False).mean()
|
||||
slow = weekly.ewm(span=spec.slow_weeks, adjust=False).mean()
|
||||
frame = pd.DataFrame(
|
||||
{
|
||||
"close_gap": weekly / slow - 1.0,
|
||||
"fast_gap": fast / slow - 1.0,
|
||||
}
|
||||
)
|
||||
close_scale = frame["close_gap"].apply(lambda value: _ramp(float(value), spec.close_gap_start, spec.close_gap_full))
|
||||
fast_scale = frame["fast_gap"].apply(lambda value: _ramp(float(value), spec.fast_gap_start, spec.fast_gap_full))
|
||||
blended = spec.close_weight * close_scale + (1.0 - spec.close_weight) * fast_scale
|
||||
macro_scale = spec.floor + (1.0 - spec.floor) * blended.clip(0.0, 1.0)
|
||||
aligned = macro_scale.reindex(pd.DatetimeIndex(timestamps, name="timestamp"), method="ffill")
|
||||
aligned = aligned.fillna(1.0).clip(spec.floor, 1.0)
|
||||
return aligned.astype(float)
|
||||
|
||||
|
||||
def compose_cash_overlay_curve(
|
||||
*,
|
||||
timestamps: list[pd.Timestamp],
|
||||
score_frame: pd.DataFrame,
|
||||
core_returns: pd.Series,
|
||||
core_exposure_frame: pd.DataFrame,
|
||||
cap_returns: pd.Series,
|
||||
chop_returns: pd.Series,
|
||||
dist_returns: pd.Series,
|
||||
candidate: CashOverlayCandidate,
|
||||
macro_scale_map: pd.Series | None = None,
|
||||
) -> tuple[pd.Series, pd.DataFrame]:
|
||||
score_map = score_frame.set_index("timestamp")[
|
||||
["core_score", "panic_score", "choppy_score", "distribution_score"]
|
||||
].sort_index()
|
||||
if core_exposure_frame.empty:
|
||||
cash_map = pd.Series(1.0, index=pd.DatetimeIndex(timestamps[:-1], name="timestamp"))
|
||||
else:
|
||||
cash_map = core_exposure_frame.set_index("timestamp")["cash_pct"].sort_index()
|
||||
|
||||
equity = 1000.0
|
||||
idx = [timestamps[0]]
|
||||
vals = [equity]
|
||||
rows: list[dict[str, float | str]] = []
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = pd.Timestamp(timestamps[i - 1])
|
||||
execution_ts = pd.Timestamp(timestamps[i])
|
||||
score_row = score_map.loc[signal_ts] if signal_ts in score_map.index else None
|
||||
if score_row is None:
|
||||
core_score = panic_score = choppy_score = distribution_score = 0.0
|
||||
else:
|
||||
core_score = float(score_row["core_score"])
|
||||
panic_score = float(score_row["panic_score"])
|
||||
choppy_score = float(score_row["choppy_score"])
|
||||
distribution_score = float(score_row["distribution_score"])
|
||||
|
||||
macro_scale = float(macro_scale_map.get(signal_ts, 1.0)) if macro_scale_map is not None else 1.0
|
||||
raw_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
|
||||
cash_pct = raw_cash_pct + (1.0 - raw_cash_pct) * (1.0 - macro_scale)
|
||||
cap_signal = _clip01((panic_score - candidate.cap_threshold) / max(1.0 - candidate.cap_threshold, 1e-9))
|
||||
chop_signal = _clip01((choppy_score - candidate.chop_threshold) / max(1.0 - candidate.chop_threshold, 1e-9))
|
||||
dist_signal = _clip01((distribution_score - candidate.dist_threshold) / max(1.0 - candidate.dist_threshold, 1e-9))
|
||||
|
||||
if core_score > candidate.core_block_threshold:
|
||||
chop_signal *= 0.25
|
||||
dist_signal *= 0.35
|
||||
|
||||
cap_weight = cash_pct * candidate.cap_cash_weight * cap_signal
|
||||
chop_weight = cash_pct * candidate.chop_cash_weight * chop_signal
|
||||
dist_weight = cash_pct * candidate.dist_cash_weight * dist_signal
|
||||
overlay_total = cap_weight + chop_weight + dist_weight
|
||||
if overlay_total > cash_pct and overlay_total > 0:
|
||||
scale = cash_pct / overlay_total
|
||||
cap_weight *= scale
|
||||
chop_weight *= scale
|
||||
dist_weight *= scale
|
||||
overlay_total = cash_pct
|
||||
|
||||
bar_ret = (
|
||||
float(core_returns.get(execution_ts, 0.0)) * macro_scale
|
||||
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
|
||||
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
|
||||
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
|
||||
)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
idx.append(execution_ts)
|
||||
vals.append(equity)
|
||||
rows.append(
|
||||
{
|
||||
"timestamp": execution_ts,
|
||||
"raw_core_cash_pct": raw_cash_pct,
|
||||
"core_cash_pct": cash_pct,
|
||||
"macro_scale": macro_scale,
|
||||
"cap_weight": cap_weight,
|
||||
"chop_weight": chop_weight,
|
||||
"dist_weight": dist_weight,
|
||||
"overlay_total": overlay_total,
|
||||
"core_score": core_score,
|
||||
"panic_score": panic_score,
|
||||
"choppy_score": choppy_score,
|
||||
"distribution_score": distribution_score,
|
||||
"portfolio_return": bar_ret,
|
||||
}
|
||||
)
|
||||
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
|
||||
weights = pd.DataFrame(rows)
|
||||
return curve, weights
|
||||
|
||||
|
||||
def evaluate_candidate_exact(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
candidate: SoftRouterCandidate,
|
||||
cache_path: str | None = None,
|
||||
max_workers: int = 6,
|
||||
) -> dict[str, object]:
|
||||
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for days, label in WINDOWS:
|
||||
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
||||
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
||||
|
||||
ctx = mp.get_context("fork")
|
||||
cache_path = cache_path or "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
latest_weights: list[dict[str, object]] = []
|
||||
|
||||
with ProcessPoolExecutor(max_workers=min(max_workers, len(period_specs)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_exact_period_worker,
|
||||
cache_path,
|
||||
asdict(candidate),
|
||||
kind,
|
||||
label,
|
||||
str(start),
|
||||
str(end),
|
||||
): (kind, label)
|
||||
for kind, label, start, end in period_specs
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
kind, label, metrics, weight_tail = future.result()
|
||||
if kind == "window":
|
||||
window_results[label] = metrics
|
||||
else:
|
||||
year_results[label] = metrics
|
||||
if label == "2026_YTD":
|
||||
latest_weights = weight_tail
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": {label: window_results[label] for _, label in WINDOWS},
|
||||
"years": year_results,
|
||||
"latest_weights": latest_weights,
|
||||
"validation": "exact_independent_periods_soft_router",
|
||||
}
|
||||
|
||||
|
||||
def evaluate_cash_overlay_exact(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
candidate: CashOverlayCandidate,
|
||||
cache_path: str | None = None,
|
||||
max_workers: int = 6,
|
||||
core_config_overrides: dict[str, object] | None = None,
|
||||
macro_scale_spec: MacroScaleSpec | None = None,
|
||||
) -> dict[str, object]:
|
||||
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for days, label in WINDOWS:
|
||||
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
||||
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
||||
|
||||
ctx = mp.get_context("fork")
|
||||
cache_path = cache_path or "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
latest_weights: list[dict[str, object]] = []
|
||||
|
||||
with ProcessPoolExecutor(max_workers=min(max_workers, len(period_specs)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_exact_cash_overlay_period_worker,
|
||||
cache_path,
|
||||
asdict(candidate),
|
||||
core_config_overrides or {},
|
||||
asdict(macro_scale_spec) if macro_scale_spec is not None else None,
|
||||
kind,
|
||||
label,
|
||||
str(start),
|
||||
str(end),
|
||||
): (kind, label)
|
||||
for kind, label, start, end in period_specs
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
kind, label, metrics, weight_tail = future.result()
|
||||
if kind == "window":
|
||||
window_results[label] = metrics
|
||||
else:
|
||||
year_results[label] = metrics
|
||||
if label == "2026_YTD":
|
||||
latest_weights = weight_tail
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"macro_scale_spec": asdict(macro_scale_spec) if macro_scale_spec is not None else None,
|
||||
"windows": {label: window_results[label] for _, label in WINDOWS},
|
||||
"years": year_results,
|
||||
"latest_weights": latest_weights,
|
||||
"validation": "exact_independent_periods_cash_overlay",
|
||||
}
|
||||
|
||||
|
||||
def _exact_period_worker(
|
||||
cache_path: str,
|
||||
candidate_payload: dict[str, object],
|
||||
kind: str,
|
||||
label: str,
|
||||
start_text: str,
|
||||
end_text: str,
|
||||
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
|
||||
bundle, _ = load_component_bundle(cache_path)
|
||||
candidate = SoftRouterCandidate(**candidate_payload)
|
||||
eval_start = pd.Timestamp(start_text)
|
||||
eval_end = pd.Timestamp(end_text)
|
||||
components = build_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=eval_end,
|
||||
profile_name=candidate.regime_profile,
|
||||
core_filter=candidate.core_filter,
|
||||
cap_engine=candidate.cap_engine,
|
||||
chop_engine=candidate.chop_engine,
|
||||
dist_engine=candidate.dist_engine,
|
||||
)
|
||||
curve, weights = compose_soft_router_curve(candidate=candidate, **components)
|
||||
weight_tail = weights.tail(1).copy()
|
||||
if not weight_tail.empty and "timestamp" in weight_tail.columns:
|
||||
weight_tail["timestamp"] = weight_tail["timestamp"].astype(str)
|
||||
return kind, label, segment_metrics(curve, eval_start, eval_end), weight_tail.to_dict(orient="records")
|
||||
|
||||
|
||||
def _exact_cash_overlay_period_worker(
|
||||
cache_path: str,
|
||||
candidate_payload: dict[str, object],
|
||||
core_config_overrides_payload: dict[str, object],
|
||||
macro_scale_spec_payload: dict[str, object] | None,
|
||||
kind: str,
|
||||
label: str,
|
||||
start_text: str,
|
||||
end_text: str,
|
||||
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
|
||||
bundle, _ = load_component_bundle(cache_path)
|
||||
candidate = CashOverlayCandidate(**candidate_payload)
|
||||
macro_scale_spec = MacroScaleSpec(**macro_scale_spec_payload) if macro_scale_spec_payload else None
|
||||
eval_start = pd.Timestamp(start_text)
|
||||
eval_end = pd.Timestamp(end_text)
|
||||
components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=eval_end,
|
||||
profile_name=candidate.regime_profile,
|
||||
core_filter=candidate.core_filter,
|
||||
cap_engine=candidate.cap_engine,
|
||||
chop_engine=candidate.chop_engine,
|
||||
dist_engine=candidate.dist_engine,
|
||||
core_config_overrides=core_config_overrides_payload,
|
||||
macro_scale_spec=macro_scale_spec,
|
||||
)
|
||||
curve, weights = compose_cash_overlay_curve(candidate=candidate, **components)
|
||||
weight_tail = weights.tail(1).copy()
|
||||
if not weight_tail.empty and "timestamp" in weight_tail.columns:
|
||||
weight_tail["timestamp"] = weight_tail["timestamp"].astype(str)
|
||||
return kind, label, segment_metrics(curve, eval_start, eval_end), weight_tail.to_dict(orient="records")
|
||||
|
||||
|
||||
def build_full_period_components(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
profile_name: str,
|
||||
core_filter: str,
|
||||
cap_engine: str,
|
||||
chop_engine: str,
|
||||
dist_engine: str,
|
||||
) -> dict[str, object]:
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
return build_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=profile_name,
|
||||
core_filter=core_filter,
|
||||
cap_engine=cap_engine,
|
||||
chop_engine=chop_engine,
|
||||
dist_engine=dist_engine,
|
||||
)
|
||||
Reference in New Issue
Block a user