Initial strategy32 research and live runtime
This commit is contained in:
48
.env.example
Normal file
48
.env.example
Normal file
@@ -0,0 +1,48 @@
|
||||
# Telegram
|
||||
GAMMA_TELEGRAM_TOKEN=
|
||||
GAMMA_TELEGRAM_CHAT_ID=
|
||||
GAMMA_TELEGRAM_MIN_LEVEL=INFO
|
||||
TELEGRAM_BOT_TOKEN=
|
||||
TELEGRAM_CHAT_ID=
|
||||
TELEGRAM_MIN_LEVEL=INFO
|
||||
|
||||
# Binance USD-M
|
||||
GAMMA_BOT_API_KEY=
|
||||
GAMMA_BOT_API_SECRET=
|
||||
BN_API_KEY=
|
||||
BN_API_SECRET=
|
||||
STRATEGY32_BINANCE_TESTNET=true
|
||||
STRATEGY32_INCLUDE_ACCOUNT_SNAPSHOT=true
|
||||
STRATEGY32_ENABLE_LIVE_ORDERS=true
|
||||
STRATEGY32_EXECUTION_LEVERAGE=5
|
||||
STRATEGY32_MIN_TARGET_NOTIONAL_USD=25
|
||||
STRATEGY32_MIN_REBALANCE_NOTIONAL_USD=10
|
||||
STRATEGY32_CLOSE_ORPHAN_POSITIONS=true
|
||||
|
||||
# Monitor runtime
|
||||
STRATEGY32_LOG_LEVEL=INFO
|
||||
STRATEGY32_TIMEFRAME=4h
|
||||
STRATEGY32_MACRO_FILTER_TIMEFRAME=1w
|
||||
STRATEGY32_MACRO_FILTER_FAST_WEEKS=10
|
||||
STRATEGY32_MACRO_FILTER_SLOW_WEEKS=30
|
||||
STRATEGY32_HARD_FILTER_TIMEFRAME=1d
|
||||
STRATEGY32_EXECUTION_REFINEMENT_TIMEFRAME=1h
|
||||
STRATEGY32_LOOKBACK_DAYS=365
|
||||
STRATEGY32_WARMUP_DAYS=90
|
||||
STRATEGY32_POLL_SECONDS=60
|
||||
STRATEGY32_LIVE_MIN_QUOTE_VOLUME_24H=100000000
|
||||
STRATEGY32_HARD_FILTER_MIN_HISTORY_BARS=120
|
||||
STRATEGY32_HARD_FILTER_LOOKBACK_BARS=30
|
||||
STRATEGY32_HARD_FILTER_MIN_AVG_DOLLAR_VOLUME=50000000
|
||||
STRATEGY32_EXECUTION_REFINEMENT_LOOKBACK_BARS=48
|
||||
STRATEGY32_EXECUTION_REFINEMENT_FAST_EMA=8
|
||||
STRATEGY32_EXECUTION_REFINEMENT_SLOW_EMA=21
|
||||
STRATEGY32_EXECUTION_REFINEMENT_SCALE_DOWN_GAP=0.008
|
||||
STRATEGY32_EXECUTION_REFINEMENT_MAX_CHASE_GAP=0.018
|
||||
STRATEGY32_EXECUTION_REFINEMENT_MAX_RECENT_RETURN=0.03
|
||||
STRATEGY32_EXECUTION_REFINEMENT_SCALE_DOWN_FACTOR=0.5
|
||||
STRATEGY32_ENTRY_ONLY_REFINEMENT=true
|
||||
STRATEGY32_MAX_SPECS=0
|
||||
STRATEGY32_PAPER_CAPITAL_USD=1000
|
||||
STRATEGY32_MAX_STALENESS_DAYS=3
|
||||
STRATEGY32_RUNTIME_DIR=runtime
|
||||
6
.gitignore
vendored
Normal file
6
.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
.pytest_cache/
|
||||
.DS_Store
|
||||
.env
|
||||
runtime/
|
||||
20
Dockerfile
Normal file
20
Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN pip install --no-cache-dir pandas
|
||||
|
||||
COPY strategy29/ strategy29/
|
||||
COPY strategy32/ strategy32/
|
||||
|
||||
RUN mkdir -p /app/runtime
|
||||
|
||||
ENTRYPOINT ["python", "-m", "strategy32.scripts.run_live_monitor"]
|
||||
5
__init__.py
Normal file
5
__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
__all__ = ["__version__"]
|
||||
|
||||
__version__ = "0.1.0"
|
||||
1
backtest/__init__.py
Normal file
1
backtest/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from __future__ import annotations
|
||||
1046
backtest/simulator.py
Normal file
1046
backtest/simulator.py
Normal file
File diff suppressed because it is too large
Load Diff
185
config.py
Normal file
185
config.py
Normal file
@@ -0,0 +1,185 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from strategy29.common.models import Regime
|
||||
from strategy29.config import Strategy29Config
|
||||
|
||||
|
||||
DEFAULT_QUOTE_ASSETS = ("USDT", "USDC")
|
||||
DEFAULT_EXCLUDED_BASE_ASSETS = ("USDT", "USDC", "BUSD", "FDUSD", "TUSD", "USDP", "DAI", "USDS", "USDD", "EUR", "AEUR")
|
||||
|
||||
PROFILE_V5_BASELINE = "v5_baseline"
|
||||
PROFILE_V7_DEFAULT = "v7_default"
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class Strategy32Budgets:
|
||||
strong_up_momentum: float = 0.85
|
||||
strong_up_carry: float = 0.10
|
||||
strong_up_sideways: float = 0.00
|
||||
up_momentum: float = 0.25
|
||||
up_carry: float = 0.15
|
||||
up_sideways: float = 0.00
|
||||
sideways_momentum: float = 0.00
|
||||
sideways_carry: float = 0.20
|
||||
sideways_sideways: float = 0.10
|
||||
down_momentum: float = 0.00
|
||||
down_carry: float = 0.10
|
||||
down_sideways: float = 0.03
|
||||
|
||||
def for_regime(self, regime: Regime) -> tuple[float, float, float]:
|
||||
if regime == Regime.STRONG_UP:
|
||||
return self.strong_up_momentum, self.strong_up_carry, self.strong_up_sideways
|
||||
if regime == Regime.UP:
|
||||
return self.up_momentum, self.up_carry, self.up_sideways
|
||||
if regime == Regime.SIDEWAYS:
|
||||
return self.sideways_momentum, self.sideways_carry, self.sideways_sideways
|
||||
if regime == Regime.DOWN:
|
||||
return self.down_momentum, self.down_carry, self.down_sideways
|
||||
return 0.0, 0.0, 0.0
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class Strategy32Config:
|
||||
symbols: list[str] = field(default_factory=list)
|
||||
auto_discover_symbols: bool = True
|
||||
quote_assets: tuple[str, ...] = DEFAULT_QUOTE_ASSETS
|
||||
excluded_base_assets: tuple[str, ...] = DEFAULT_EXCLUDED_BASE_ASSETS
|
||||
discovery_min_quote_volume_24h: float = 5_000_000.0
|
||||
timeframe: str = "4h"
|
||||
warmup_days: int = 90
|
||||
max_symbol_staleness_days: int = 3
|
||||
universe_size: int = 0
|
||||
universe_lookback_bars: int = 30
|
||||
universe_min_avg_dollar_volume: float = 5_000_000.0
|
||||
hard_filter_refresh_cadence: str = "4h"
|
||||
hard_filter_min_history_bars: int = 120
|
||||
hard_filter_lookback_bars: int = 30
|
||||
hard_filter_min_avg_dollar_volume: float = 50_000_000.0
|
||||
momentum_min_history_bars: int = 120
|
||||
momentum_min_score: float = 0.55
|
||||
momentum_min_relative_strength: float = 0.0
|
||||
momentum_min_7d_return: float = 0.0
|
||||
momentum_max_7d_return: float = 1.0
|
||||
momentum_min_positive_bar_ratio: float = 0.0
|
||||
momentum_max_short_volatility: float = 1.0
|
||||
momentum_max_beta: float = 10.0
|
||||
momentum_max_latest_funding_rate: float = 1.0
|
||||
enable_liquidity_universe_fallback: bool = True
|
||||
universe_fallback_min_avg_dollar_volume: float = 2_500_000.0
|
||||
universe_fallback_top_n: int = 8
|
||||
enable_momentum_filter_fallback: bool = True
|
||||
momentum_fallback_min_score: float = 0.45
|
||||
momentum_fallback_min_relative_strength: float = -0.03
|
||||
momentum_fallback_min_7d_return: float = -0.02
|
||||
momentum_fallback_top_n: int = 3
|
||||
carry_min_expected_edge: float = 0.0
|
||||
position_vol_lookback_bars: int = 36
|
||||
correlation_lookback_bars: int = 36
|
||||
max_pairwise_correlation: float = 0.78
|
||||
target_annualized_vol: float = 0.55
|
||||
governor_vol_lookback_bars: int = 42
|
||||
drawdown_window_days: int = 30
|
||||
drawdown_scale_1_trigger: float = 0.08
|
||||
drawdown_scale_1: float = 0.70
|
||||
drawdown_scale_2_trigger: float = 0.12
|
||||
drawdown_scale_2: float = 0.40
|
||||
drawdown_stop_trigger: float = 0.18
|
||||
vol_scale_floor: float = 0.35
|
||||
up_btc_hedge_ratio: float = 0.35
|
||||
sideways_btc_hedge_ratio: float = 0.65
|
||||
sideways_top_n: int = 1
|
||||
carry_relaxed_top_n: int = 128
|
||||
carry_relaxed_min_positive_ratio: float = 0.52
|
||||
carry_relaxed_min_mean_funding_rate: float = 0.000015
|
||||
carry_relaxed_max_basis_volatility: float = 0.0075
|
||||
carry_relaxed_roundtrip_cost_pct: float = 0.0022
|
||||
carry_relaxed_basis_risk_multiplier: float = 1.0
|
||||
carry_deep_relaxed_min_positive_ratio: float = 0.48
|
||||
carry_deep_relaxed_min_mean_funding_rate: float = 0.0
|
||||
carry_deep_relaxed_max_basis_volatility: float = 0.012
|
||||
carry_deep_relaxed_roundtrip_cost_pct: float = 0.0018
|
||||
carry_deep_relaxed_basis_risk_multiplier: float = 0.75
|
||||
enable_carry_score_fallback: bool = True
|
||||
carry_score_fallback_min_expected_edge: float = -0.0002
|
||||
carry_score_fallback_min_positive_ratio: float = 0.48
|
||||
carry_score_fallback_top_n: int = 2
|
||||
enable_sideways_engine: bool = False
|
||||
enable_strong_kill_switch: bool = True
|
||||
enable_daily_trend_filter: bool = True
|
||||
enable_expanded_hedge: bool = False
|
||||
enable_max_holding_exit: bool = False
|
||||
enable_execution_refinement: bool = True
|
||||
execution_refinement_timeframe: str = "1h"
|
||||
execution_refinement_lookback_bars: int = 48
|
||||
execution_refinement_fast_ema: int = 8
|
||||
execution_refinement_slow_ema: int = 21
|
||||
execution_refinement_scale_down_gap: float = 0.008
|
||||
execution_refinement_max_chase_gap: float = 0.018
|
||||
execution_refinement_max_recent_return: float = 0.03
|
||||
execution_refinement_scale_down_factor: float = 0.5
|
||||
strong_kill_drawdown_window_days: int = 60
|
||||
strong_kill_scale_1_trigger: float = 0.05
|
||||
strong_kill_scale_1: float = 0.60
|
||||
strong_kill_scale_2_trigger: float = 0.08
|
||||
strong_kill_scale_2: float = 0.35
|
||||
strong_kill_stop_trigger: float = 0.10
|
||||
long_trend_fast_ema_days: int = 50
|
||||
long_trend_slow_ema_days: int = 200
|
||||
expanded_up_btc_hedge_ratio: float = 0.55
|
||||
expanded_sideways_btc_hedge_ratio: float = 0.85
|
||||
expanded_strong_up_btc_hedge_ratio: float = 0.10
|
||||
max_holding_bars: int = 42
|
||||
trend_fail_ema_span: int = 18
|
||||
min_hold_bars_for_trend_fail: int = 12
|
||||
budgets: Strategy32Budgets = field(default_factory=Strategy32Budgets)
|
||||
|
||||
|
||||
PROFILE_OVERRIDES: dict[str, dict[str, bool]] = {
|
||||
PROFILE_V5_BASELINE: {
|
||||
"enable_sideways_engine": True,
|
||||
"enable_strong_kill_switch": False,
|
||||
"enable_daily_trend_filter": False,
|
||||
"enable_expanded_hedge": False,
|
||||
"enable_max_holding_exit": False,
|
||||
},
|
||||
PROFILE_V7_DEFAULT: {
|
||||
"enable_sideways_engine": False,
|
||||
"enable_strong_kill_switch": True,
|
||||
"enable_daily_trend_filter": True,
|
||||
"enable_expanded_hedge": False,
|
||||
"enable_max_holding_exit": False,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def apply_strategy32_profile(config: Strategy32Config, profile: str) -> Strategy32Config:
|
||||
try:
|
||||
overrides = PROFILE_OVERRIDES[profile]
|
||||
except KeyError as exc:
|
||||
supported = ", ".join(sorted(PROFILE_OVERRIDES))
|
||||
raise ValueError(f"Unsupported Strategy32 profile: {profile}. Supported: {supported}") from exc
|
||||
for attr, value in overrides.items():
|
||||
setattr(config, attr, value)
|
||||
return config
|
||||
|
||||
|
||||
def build_strategy32_config(profile: str = PROFILE_V7_DEFAULT, **overrides: object) -> Strategy32Config:
|
||||
config = Strategy32Config()
|
||||
apply_strategy32_profile(config, profile)
|
||||
for attr, value in overrides.items():
|
||||
setattr(config, attr, value)
|
||||
return config
|
||||
|
||||
|
||||
def build_engine_config() -> Strategy29Config:
|
||||
config = Strategy29Config()
|
||||
config.momentum.top_n = 128
|
||||
config.momentum.max_positions = 128
|
||||
config.momentum.rebalance_bars = 24
|
||||
config.momentum.trailing_stop_pct = 0.08
|
||||
config.momentum.stop_loss_pct = 0.07
|
||||
config.momentum.overheat_funding_rate = 0.00025
|
||||
config.carry.top_n = 128
|
||||
return config
|
||||
185
data.py
Normal file
185
data.py
Normal file
@@ -0,0 +1,185 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.common.models import MarketDataBundle
|
||||
from strategy29.data.basis import compute_basis_frame
|
||||
from strategy29.data.binance_history import BinancePairSpec, discover_usd_quote_pair_specs, fetch_funding_paginated, fetch_klines_paginated
|
||||
from strategy29.data.funding import align_funding_to_price_bars
|
||||
|
||||
|
||||
def resolve_strategy32_pair_specs(
|
||||
*,
|
||||
symbols: list[str],
|
||||
auto_discover_symbols: bool,
|
||||
quote_assets: tuple[str, ...],
|
||||
excluded_base_assets: tuple[str, ...],
|
||||
min_quote_volume_24h: float,
|
||||
) -> list[BinancePairSpec]:
|
||||
discovered = discover_usd_quote_pair_specs(
|
||||
quote_assets=quote_assets,
|
||||
excluded_base_assets=excluded_base_assets,
|
||||
min_quote_volume_24h=0.0,
|
||||
)
|
||||
by_base = {spec.base_symbol: spec for spec in discovered}
|
||||
preferred_quote = quote_assets[0]
|
||||
if auto_discover_symbols:
|
||||
# Point-in-time liquidity is enforced inside the backtest using historical bars.
|
||||
# Filtering discovery by current 24h quote volume leaks future information into past windows.
|
||||
return discovered
|
||||
if symbols:
|
||||
specs: list[BinancePairSpec] = []
|
||||
for symbol in symbols:
|
||||
base_symbol = symbol.upper()
|
||||
if base_symbol in by_base:
|
||||
spec = by_base[base_symbol]
|
||||
if min(spec.spot_quote_volume_24h, spec.perp_quote_volume_24h) >= min_quote_volume_24h:
|
||||
specs.append(spec)
|
||||
continue
|
||||
specs.append(
|
||||
BinancePairSpec(
|
||||
base_symbol=base_symbol,
|
||||
quote_asset=preferred_quote,
|
||||
spot_symbol=f"{base_symbol}{preferred_quote}",
|
||||
perp_symbol=f"{base_symbol}{preferred_quote}",
|
||||
spot_quote_volume_24h=0.0,
|
||||
perp_quote_volume_24h=0.0,
|
||||
)
|
||||
)
|
||||
return specs
|
||||
return []
|
||||
|
||||
|
||||
def build_strategy32_market_bundle(
|
||||
*,
|
||||
symbols: list[str],
|
||||
auto_discover_symbols: bool,
|
||||
quote_assets: tuple[str, ...],
|
||||
excluded_base_assets: tuple[str, ...],
|
||||
min_quote_volume_24h: float,
|
||||
start: pd.Timestamp,
|
||||
end: pd.Timestamp,
|
||||
timeframe: str = "4h",
|
||||
max_staleness_days: int = 3,
|
||||
) -> tuple[MarketDataBundle, pd.Timestamp, list[str], list[str], dict[str, str]]:
|
||||
specs = resolve_strategy32_pair_specs(
|
||||
symbols=symbols,
|
||||
auto_discover_symbols=auto_discover_symbols,
|
||||
quote_assets=quote_assets,
|
||||
excluded_base_assets=excluded_base_assets,
|
||||
min_quote_volume_24h=min_quote_volume_24h,
|
||||
)
|
||||
return build_strategy32_market_bundle_from_specs(
|
||||
specs=specs,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=timeframe,
|
||||
max_staleness_days=max_staleness_days,
|
||||
)
|
||||
|
||||
|
||||
def build_strategy32_price_frames_from_specs(
|
||||
*,
|
||||
specs: list[BinancePairSpec],
|
||||
start: pd.Timestamp,
|
||||
end: pd.Timestamp,
|
||||
timeframe: str = "4h",
|
||||
max_staleness_days: int = 3,
|
||||
) -> tuple[dict[str, pd.DataFrame], pd.Timestamp, list[str], list[str], dict[str, str]]:
|
||||
prices: dict[str, pd.DataFrame] = {}
|
||||
accepted: list[str] = []
|
||||
rejected: list[str] = []
|
||||
quote_by_symbol: dict[str, str] = {}
|
||||
latest_completed_bar: pd.Timestamp | None = None
|
||||
staleness_cutoff = end - pd.Timedelta(days=max_staleness_days)
|
||||
|
||||
for spec in specs:
|
||||
symbol = spec.base_symbol
|
||||
try:
|
||||
perp = fetch_klines_paginated(
|
||||
symbol,
|
||||
timeframe=timeframe,
|
||||
start=start,
|
||||
end=end,
|
||||
market="perp",
|
||||
market_symbol=spec.perp_symbol,
|
||||
quote_asset=spec.quote_asset,
|
||||
)
|
||||
except Exception:
|
||||
rejected.append(symbol)
|
||||
continue
|
||||
perp = perp.loc[perp["close_time"] <= end].reset_index(drop=True)
|
||||
if perp.empty:
|
||||
rejected.append(symbol)
|
||||
continue
|
||||
symbol_end = pd.Timestamp(perp["timestamp"].iloc[-1])
|
||||
if symbol_end < staleness_cutoff:
|
||||
rejected.append(symbol)
|
||||
continue
|
||||
prices[symbol] = perp[["timestamp", "open", "high", "low", "close", "volume"]].copy()
|
||||
latest_completed_bar = symbol_end if latest_completed_bar is None else min(latest_completed_bar, symbol_end)
|
||||
accepted.append(symbol)
|
||||
quote_by_symbol[symbol] = spec.quote_asset
|
||||
|
||||
if latest_completed_bar is None:
|
||||
raise ValueError("no completed bars fetched for strategy32 price frames")
|
||||
return prices, latest_completed_bar, accepted, rejected, quote_by_symbol
|
||||
|
||||
|
||||
def build_strategy32_market_bundle_from_specs(
|
||||
*,
|
||||
specs: list[BinancePairSpec],
|
||||
start: pd.Timestamp,
|
||||
end: pd.Timestamp,
|
||||
timeframe: str = "4h",
|
||||
max_staleness_days: int = 3,
|
||||
) -> tuple[MarketDataBundle, pd.Timestamp, list[str], list[str], dict[str, str]]:
|
||||
prices, latest_completed_bar, accepted, rejected, quote_by_symbol = build_strategy32_price_frames_from_specs(
|
||||
specs=specs,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=timeframe,
|
||||
max_staleness_days=max_staleness_days,
|
||||
)
|
||||
funding: dict[str, pd.DataFrame] = {}
|
||||
|
||||
by_base_symbol = {spec.base_symbol: spec for spec in specs}
|
||||
for symbol in accepted:
|
||||
if symbol == "BTC":
|
||||
continue
|
||||
spec = by_base_symbol[symbol]
|
||||
try:
|
||||
spot = fetch_klines_paginated(
|
||||
symbol,
|
||||
timeframe=timeframe,
|
||||
start=start,
|
||||
end=end,
|
||||
market="spot",
|
||||
market_symbol=spec.spot_symbol,
|
||||
quote_asset=spec.quote_asset,
|
||||
)
|
||||
except Exception:
|
||||
continue
|
||||
spot = spot.loc[spot["close_time"] <= end].reset_index(drop=True)
|
||||
if spot.empty:
|
||||
continue
|
||||
basis = compute_basis_frame(spot[["timestamp", "close"]], prices[symbol][["timestamp", "close"]])
|
||||
try:
|
||||
funding_rates = fetch_funding_paginated(
|
||||
symbol,
|
||||
start=start,
|
||||
end=end,
|
||||
market_symbol=spec.perp_symbol,
|
||||
quote_asset=spec.quote_asset,
|
||||
)
|
||||
except Exception:
|
||||
funding_rates = pd.DataFrame(columns=["timestamp", "funding_rate"])
|
||||
if funding_rates.empty:
|
||||
funding[symbol] = basis.assign(funding_rate=0.0)[["timestamp", "funding_rate", "basis"]]
|
||||
continue
|
||||
funding[symbol] = align_funding_to_price_bars(
|
||||
funding_rates.merge(basis, on="timestamp", how="outer").sort_values("timestamp").ffill(),
|
||||
prices[symbol]["timestamp"],
|
||||
)[["timestamp", "funding_rate", "basis"]]
|
||||
|
||||
return MarketDataBundle(prices=prices, funding=funding), latest_completed_bar, accepted, rejected, quote_by_symbol
|
||||
52
docker-compose.yml
Normal file
52
docker-compose.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
services:
|
||||
strategy32-live-monitor:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: strategy32/Dockerfile
|
||||
container_name: strategy32-live-monitor
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- /Volumes/SSD/workspace/money-bot/strategy11/.env
|
||||
environment:
|
||||
STRATEGY32_TIMEFRAME: ${STRATEGY32_TIMEFRAME:-4h}
|
||||
STRATEGY32_MACRO_FILTER_TIMEFRAME: ${STRATEGY32_MACRO_FILTER_TIMEFRAME:-1w}
|
||||
STRATEGY32_MACRO_FILTER_FAST_WEEKS: ${STRATEGY32_MACRO_FILTER_FAST_WEEKS:-10}
|
||||
STRATEGY32_MACRO_FILTER_SLOW_WEEKS: ${STRATEGY32_MACRO_FILTER_SLOW_WEEKS:-30}
|
||||
STRATEGY32_HARD_FILTER_TIMEFRAME: ${STRATEGY32_HARD_FILTER_TIMEFRAME:-1d}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_TIMEFRAME: ${STRATEGY32_EXECUTION_REFINEMENT_TIMEFRAME:-1h}
|
||||
STRATEGY32_LOOKBACK_DAYS: ${STRATEGY32_LOOKBACK_DAYS:-365}
|
||||
STRATEGY32_WARMUP_DAYS: ${STRATEGY32_WARMUP_DAYS:-90}
|
||||
STRATEGY32_POLL_SECONDS: ${STRATEGY32_POLL_SECONDS:-60}
|
||||
STRATEGY32_LIVE_MIN_QUOTE_VOLUME_24H: ${STRATEGY32_LIVE_MIN_QUOTE_VOLUME_24H:-100000000}
|
||||
STRATEGY32_HARD_FILTER_MIN_HISTORY_BARS: ${STRATEGY32_HARD_FILTER_MIN_HISTORY_BARS:-120}
|
||||
STRATEGY32_HARD_FILTER_LOOKBACK_BARS: ${STRATEGY32_HARD_FILTER_LOOKBACK_BARS:-30}
|
||||
STRATEGY32_HARD_FILTER_MIN_AVG_DOLLAR_VOLUME: ${STRATEGY32_HARD_FILTER_MIN_AVG_DOLLAR_VOLUME:-50000000}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_LOOKBACK_BARS: ${STRATEGY32_EXECUTION_REFINEMENT_LOOKBACK_BARS:-48}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_FAST_EMA: ${STRATEGY32_EXECUTION_REFINEMENT_FAST_EMA:-8}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_SLOW_EMA: ${STRATEGY32_EXECUTION_REFINEMENT_SLOW_EMA:-21}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_SCALE_DOWN_GAP: ${STRATEGY32_EXECUTION_REFINEMENT_SCALE_DOWN_GAP:-0.008}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_MAX_CHASE_GAP: ${STRATEGY32_EXECUTION_REFINEMENT_MAX_CHASE_GAP:-0.018}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_MAX_RECENT_RETURN: ${STRATEGY32_EXECUTION_REFINEMENT_MAX_RECENT_RETURN:-0.03}
|
||||
STRATEGY32_EXECUTION_REFINEMENT_SCALE_DOWN_FACTOR: ${STRATEGY32_EXECUTION_REFINEMENT_SCALE_DOWN_FACTOR:-0.5}
|
||||
STRATEGY32_ENTRY_ONLY_REFINEMENT: ${STRATEGY32_ENTRY_ONLY_REFINEMENT:-true}
|
||||
STRATEGY32_MAX_SPECS: ${STRATEGY32_MAX_SPECS:-0}
|
||||
STRATEGY32_PAPER_CAPITAL_USD: ${STRATEGY32_PAPER_CAPITAL_USD:-1000}
|
||||
STRATEGY32_MAX_STALENESS_DAYS: ${STRATEGY32_MAX_STALENESS_DAYS:-3}
|
||||
STRATEGY32_INCLUDE_ACCOUNT_SNAPSHOT: ${STRATEGY32_INCLUDE_ACCOUNT_SNAPSHOT:-1}
|
||||
STRATEGY32_BINANCE_TESTNET: ${STRATEGY32_BINANCE_TESTNET:-true}
|
||||
STRATEGY32_ENABLE_LIVE_ORDERS: ${STRATEGY32_ENABLE_LIVE_ORDERS:-true}
|
||||
STRATEGY32_EXECUTION_LEVERAGE: ${STRATEGY32_EXECUTION_LEVERAGE:-5}
|
||||
STRATEGY32_MIN_TARGET_NOTIONAL_USD: ${STRATEGY32_MIN_TARGET_NOTIONAL_USD:-25}
|
||||
STRATEGY32_MIN_REBALANCE_NOTIONAL_USD: ${STRATEGY32_MIN_REBALANCE_NOTIONAL_USD:-10}
|
||||
STRATEGY32_CLOSE_ORPHAN_POSITIONS: ${STRATEGY32_CLOSE_ORPHAN_POSITIONS:-true}
|
||||
STRATEGY32_RUNTIME_DIR: /app/runtime
|
||||
STRATEGY32_LOG_LEVEL: ${STRATEGY32_LOG_LEVEL:-INFO}
|
||||
volumes:
|
||||
- ./runtime:/app/runtime
|
||||
command: ["--runtime-dir", "/app/runtime"]
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "from pathlib import Path; raise SystemExit(0 if Path('/app/runtime/strategy32_live_latest.json').exists() else 1)"]
|
||||
interval: 60s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
2
live/__init__.py
Normal file
2
live/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from __future__ import annotations
|
||||
|
||||
111
live/binance_account.py
Normal file
111
live/binance_account.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
|
||||
REST_TESTNET = "https://testnet.binancefuture.com"
|
||||
REST_MAINNET = "https://fapi.binance.com"
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class BinanceUsdMAccountClient:
|
||||
api_key: str
|
||||
api_secret: str
|
||||
testnet: bool = False
|
||||
_response_cache: dict[str, tuple[float, Any]] = field(default_factory=dict, init=False, repr=False)
|
||||
|
||||
@property
|
||||
def base_url(self) -> str:
|
||||
return REST_TESTNET if self.testnet else REST_MAINNET
|
||||
|
||||
def _sign_params(self, params: dict[str, Any] | None = None) -> dict[str, Any]:
|
||||
payload = dict(params or {})
|
||||
payload["timestamp"] = int(time.time() * 1000)
|
||||
query = urlencode(payload)
|
||||
signature = hmac.new(self.api_secret.encode("utf-8"), query.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||
payload["signature"] = signature
|
||||
return payload
|
||||
|
||||
def _get(self, path: str, params: dict[str, Any] | None = None, *, auth: bool = True) -> Any:
|
||||
query_params = self._sign_params(params) if auth else dict(params or {})
|
||||
url = f"{self.base_url}{path}?{urlencode(query_params)}"
|
||||
request = Request(url, headers={"X-MBX-APIKEY": self.api_key})
|
||||
with urlopen(request, timeout=15) as response:
|
||||
return json.loads(response.read().decode("utf-8"))
|
||||
|
||||
def _post(self, path: str, params: dict[str, Any] | None = None, *, auth: bool = True) -> Any:
|
||||
payload = self._sign_params(params) if auth else dict(params or {})
|
||||
encoded = urlencode(payload).encode("utf-8")
|
||||
url = f"{self.base_url}{path}"
|
||||
request = Request(url, data=encoded, method="POST", headers={"X-MBX-APIKEY": self.api_key})
|
||||
with urlopen(request, timeout=15) as response:
|
||||
return json.loads(response.read().decode("utf-8"))
|
||||
|
||||
def _get_cached(self, key: str, ttl_seconds: float, loader) -> Any:
|
||||
now = time.time()
|
||||
cached = self._response_cache.get(key)
|
||||
if cached is not None:
|
||||
loaded_at, payload = cached
|
||||
if now - loaded_at <= ttl_seconds:
|
||||
return payload
|
||||
payload = loader()
|
||||
self._response_cache[key] = (now, payload)
|
||||
return payload
|
||||
|
||||
def get_balance(self) -> list[dict[str, Any]]:
|
||||
payload = self._get_cached("balance", 10.0, lambda: list(self._get("/fapi/v2/balance")))
|
||||
return list(payload)
|
||||
|
||||
def get_position_risk(self) -> list[dict[str, Any]]:
|
||||
payload = self._get_cached("position_risk", 5.0, lambda: list(self._get("/fapi/v2/positionRisk")))
|
||||
return list(payload)
|
||||
|
||||
def get_exchange_info(self) -> dict[str, Any]:
|
||||
payload = self._get_cached("exchange_info", 3600.0, lambda: dict(self._get("/fapi/v1/exchangeInfo", auth=False)))
|
||||
return dict(payload)
|
||||
|
||||
def get_ticker_price(self, symbol: str | None = None) -> Any:
|
||||
params = {"symbol": symbol} if symbol else None
|
||||
key = f"ticker_price:{symbol or '*'}"
|
||||
return self._get_cached(key, 5.0, lambda: self._get("/fapi/v1/ticker/price", params=params, auth=False))
|
||||
|
||||
def set_leverage(self, symbol: str, leverage: int) -> dict[str, Any]:
|
||||
return dict(self._post("/fapi/v1/leverage", {"symbol": symbol, "leverage": max(1, int(leverage))}))
|
||||
|
||||
def place_market_order(
|
||||
self,
|
||||
*,
|
||||
symbol: str,
|
||||
side: str,
|
||||
quantity: float,
|
||||
reduce_only: bool = False,
|
||||
client_order_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
params: dict[str, Any] = {
|
||||
"symbol": symbol,
|
||||
"side": side.upper(),
|
||||
"type": "MARKET",
|
||||
"quantity": self._format_decimal(quantity),
|
||||
"newOrderRespType": "RESULT",
|
||||
}
|
||||
if reduce_only:
|
||||
params["reduceOnly"] = "true"
|
||||
if client_order_id:
|
||||
clean = "".join(ch for ch in str(client_order_id) if ch.isalnum() or ch in "-_.")
|
||||
if clean:
|
||||
params["newClientOrderId"] = clean[:36]
|
||||
response = dict(self._post("/fapi/v1/order", params))
|
||||
self._response_cache.pop("balance", None)
|
||||
self._response_cache.pop("position_risk", None)
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def _format_decimal(value: float) -> str:
|
||||
return ("%.12f" % float(value)).rstrip("0").rstrip(".")
|
||||
27
live/env.py
Normal file
27
live/env.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_dotenv(path: str | Path) -> None:
|
||||
target = Path(path)
|
||||
if not target.exists():
|
||||
return
|
||||
for raw_line in target.read_text(encoding="utf-8").splitlines():
|
||||
line = raw_line.strip()
|
||||
if not line or line.startswith("#") or "=" not in line:
|
||||
continue
|
||||
key, value = line.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip().strip("'\"")
|
||||
if key and key not in os.environ:
|
||||
os.environ[key] = value
|
||||
|
||||
|
||||
def env_bool(name: str, default: bool = False) -> bool:
|
||||
raw = os.getenv(name)
|
||||
if raw is None:
|
||||
return default
|
||||
return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
|
||||
|
||||
275
live/executor.py
Normal file
275
live/executor.py
Normal file
@@ -0,0 +1,275 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from strategy32.live.binance_account import BinanceUsdMAccountClient
|
||||
|
||||
|
||||
KNOWN_QUOTES = ("USDT", "USDC")
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class SymbolRule:
|
||||
contract_symbol: str
|
||||
base_asset: str
|
||||
quote_asset: str
|
||||
step_size: float
|
||||
min_qty: float
|
||||
min_notional: float
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class LiveExecutionConfig:
|
||||
enabled: bool = False
|
||||
leverage: int = 1
|
||||
min_target_notional_usd: float = 25.0
|
||||
min_rebalance_notional_usd: float = 10.0
|
||||
close_orphan_positions: bool = True
|
||||
entry_only_refinement: bool = True
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ExecutionResult:
|
||||
executed_at: str
|
||||
enabled: bool
|
||||
account_equity_usd: float
|
||||
target_symbols: list[str] = field(default_factory=list)
|
||||
orders: list[dict[str, Any]] = field(default_factory=list)
|
||||
warnings: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class LiveFuturesExecutor:
|
||||
def __init__(self, client: BinanceUsdMAccountClient, config: LiveExecutionConfig) -> None:
|
||||
self.client = client
|
||||
self.config = config
|
||||
self._rules: dict[str, SymbolRule] | None = None
|
||||
self._applied_leverage: set[str] = set()
|
||||
|
||||
def reconcile(self, snapshot: dict[str, Any]) -> ExecutionResult:
|
||||
result = ExecutionResult(
|
||||
executed_at=str(snapshot.get("generated_at", "")),
|
||||
enabled=self.config.enabled,
|
||||
account_equity_usd=self._account_equity_usd(),
|
||||
)
|
||||
quote_by_symbol = dict(snapshot.get("universe", {}).get("quote_by_symbol", {}))
|
||||
execution_targets = snapshot.get("execution_targets")
|
||||
if isinstance(execution_targets, list):
|
||||
target_rows = list(execution_targets)
|
||||
else:
|
||||
target_rows = [
|
||||
row
|
||||
for row in snapshot.get("combined_targets", [])
|
||||
if bool(row.get("tradeable")) and str(row.get("instrument", "")).startswith("perp:")
|
||||
]
|
||||
result.target_symbols = [str(row["instrument"]).split(":", 1)[1] for row in target_rows]
|
||||
if not self.config.enabled:
|
||||
return result
|
||||
|
||||
target_weights = {
|
||||
str(row["instrument"]).split(":", 1)[1]: float(row.get("weight", 0.0) or 0.0)
|
||||
for row in target_rows
|
||||
}
|
||||
desired_weights = {
|
||||
str(row["instrument"]).split(":", 1)[1]: float(row.get("desired_weight", row.get("weight", 0.0)) or 0.0)
|
||||
for row in target_rows
|
||||
}
|
||||
current_positions = self._current_positions()
|
||||
all_symbols = sorted(set(target_weights) | set(current_positions if self.config.close_orphan_positions else target_weights))
|
||||
prices = self._prices_for_symbols(all_symbols, quote_by_symbol, current_positions)
|
||||
|
||||
for base_symbol in all_symbols:
|
||||
quote_asset = quote_by_symbol.get(base_symbol, current_positions.get(base_symbol, {}).get("quote_asset", "USDT"))
|
||||
rule = self._symbol_rule(base_symbol, quote_asset)
|
||||
if rule is None:
|
||||
result.warnings.append(f"missing_symbol_rule:{base_symbol}")
|
||||
continue
|
||||
|
||||
target_weight = float(target_weights.get(base_symbol, 0.0) or 0.0)
|
||||
price = float(prices.get(base_symbol, 0.0) or 0.0)
|
||||
if price <= 0:
|
||||
result.warnings.append(f"missing_price:{base_symbol}")
|
||||
continue
|
||||
|
||||
target_notional = target_weight * result.account_equity_usd
|
||||
if abs(target_notional) < max(self.config.min_target_notional_usd, rule.min_notional):
|
||||
target_notional = 0.0
|
||||
|
||||
current_qty = float(current_positions.get(base_symbol, {}).get("qty", 0.0) or 0.0)
|
||||
current_contract = current_positions.get(base_symbol, {}).get("contract_symbol", rule.contract_symbol)
|
||||
if self.config.entry_only_refinement:
|
||||
desired_weight = float(desired_weights.get(base_symbol, target_weight) or 0.0)
|
||||
desired_notional = desired_weight * result.account_equity_usd
|
||||
current_notional = current_qty * price
|
||||
if (
|
||||
current_qty != 0.0
|
||||
and desired_notional != 0.0
|
||||
and math.copysign(1.0, current_notional) == math.copysign(1.0, desired_notional)
|
||||
and abs(target_notional) < abs(desired_notional)
|
||||
):
|
||||
target_notional = math.copysign(
|
||||
min(abs(current_notional), abs(desired_notional)),
|
||||
desired_notional,
|
||||
)
|
||||
|
||||
target_qty = self._normalize_qty(rule, target_notional / price)
|
||||
|
||||
if current_qty != 0.0 and target_qty != 0.0 and math.copysign(1.0, current_qty) != math.copysign(1.0, target_qty):
|
||||
close_order = self._submit_delta(
|
||||
contract_symbol=current_contract,
|
||||
delta_qty=-current_qty,
|
||||
price=price,
|
||||
reduce_only=True,
|
||||
)
|
||||
if close_order is not None:
|
||||
result.orders.append(close_order)
|
||||
current_qty = 0.0
|
||||
|
||||
delta_qty = target_qty - current_qty
|
||||
delta_notional = abs(delta_qty) * price
|
||||
if delta_notional < max(self.config.min_rebalance_notional_usd, rule.min_notional):
|
||||
continue
|
||||
|
||||
order = self._submit_delta(
|
||||
contract_symbol=rule.contract_symbol,
|
||||
delta_qty=delta_qty,
|
||||
price=price,
|
||||
reduce_only=False,
|
||||
)
|
||||
if order is not None:
|
||||
result.orders.append(order)
|
||||
|
||||
return result
|
||||
|
||||
def _submit_delta(
|
||||
self,
|
||||
*,
|
||||
contract_symbol: str,
|
||||
delta_qty: float,
|
||||
price: float,
|
||||
reduce_only: bool,
|
||||
) -> dict[str, Any] | None:
|
||||
qty = abs(float(delta_qty))
|
||||
if qty <= 0:
|
||||
return None
|
||||
side = "BUY" if delta_qty > 0 else "SELL"
|
||||
self._ensure_leverage(contract_symbol)
|
||||
response = self.client.place_market_order(
|
||||
symbol=contract_symbol,
|
||||
side=side,
|
||||
quantity=qty,
|
||||
reduce_only=reduce_only,
|
||||
client_order_id=f"s32-{contract_symbol.lower()}-{side.lower()}",
|
||||
)
|
||||
return {
|
||||
"symbol": contract_symbol,
|
||||
"side": side,
|
||||
"quantity": qty,
|
||||
"price_ref": price,
|
||||
"reduce_only": reduce_only,
|
||||
"response": response,
|
||||
}
|
||||
|
||||
def _ensure_leverage(self, contract_symbol: str) -> None:
|
||||
if contract_symbol in self._applied_leverage:
|
||||
return
|
||||
self.client.set_leverage(contract_symbol, self.config.leverage)
|
||||
self._applied_leverage.add(contract_symbol)
|
||||
|
||||
def _account_equity_usd(self) -> float:
|
||||
balances = self.client.get_balance()
|
||||
total = 0.0
|
||||
for row in balances:
|
||||
asset = str(row.get("asset", "")).upper()
|
||||
if asset in KNOWN_QUOTES:
|
||||
total += float(row.get("balance", 0.0) or 0.0)
|
||||
return total
|
||||
|
||||
def _current_positions(self) -> dict[str, dict[str, Any]]:
|
||||
rows = self.client.get_position_risk()
|
||||
positions: dict[str, dict[str, Any]] = {}
|
||||
for row in rows:
|
||||
qty = float(row.get("positionAmt", 0.0) or 0.0)
|
||||
if abs(qty) <= 1e-12:
|
||||
continue
|
||||
contract_symbol = str(row.get("symbol", "")).upper()
|
||||
base_asset, quote_asset = self._split_contract_symbol(contract_symbol)
|
||||
positions[base_asset] = {
|
||||
"qty": qty,
|
||||
"quote_asset": quote_asset,
|
||||
"contract_symbol": contract_symbol,
|
||||
"mark_price": float(row.get("markPrice", 0.0) or 0.0),
|
||||
}
|
||||
return positions
|
||||
|
||||
def _prices_for_symbols(
|
||||
self,
|
||||
symbols: list[str],
|
||||
quote_by_symbol: dict[str, str],
|
||||
current_positions: dict[str, dict[str, Any]],
|
||||
) -> dict[str, float]:
|
||||
prices: dict[str, float] = {}
|
||||
for base_symbol in symbols:
|
||||
current = current_positions.get(base_symbol)
|
||||
if current is not None and float(current.get("mark_price", 0.0) or 0.0) > 0:
|
||||
prices[base_symbol] = float(current["mark_price"])
|
||||
continue
|
||||
quote_asset = quote_by_symbol.get(base_symbol, "USDT")
|
||||
contract_symbol = f"{base_symbol}{quote_asset}"
|
||||
ticker = self.client.get_ticker_price(contract_symbol)
|
||||
if isinstance(ticker, dict):
|
||||
prices[base_symbol] = float(ticker.get("price", 0.0) or 0.0)
|
||||
return prices
|
||||
|
||||
def _symbol_rule(self, base_symbol: str, quote_asset: str) -> SymbolRule | None:
|
||||
rules = self._load_rules()
|
||||
return rules.get(f"{base_symbol}{quote_asset}")
|
||||
|
||||
def _load_rules(self) -> dict[str, SymbolRule]:
|
||||
if self._rules is not None:
|
||||
return self._rules
|
||||
info = self.client.get_exchange_info()
|
||||
rules: dict[str, SymbolRule] = {}
|
||||
for row in info.get("symbols", []):
|
||||
contract_symbol = str(row.get("symbol", "")).upper()
|
||||
if not contract_symbol:
|
||||
continue
|
||||
step_size = 0.0
|
||||
min_qty = 0.0
|
||||
min_notional = 5.0
|
||||
for flt in row.get("filters", []):
|
||||
flt_type = str(flt.get("filterType", ""))
|
||||
if flt_type in {"LOT_SIZE", "MARKET_LOT_SIZE"}:
|
||||
step_size = max(step_size, float(flt.get("stepSize", 0.0) or 0.0))
|
||||
min_qty = max(min_qty, float(flt.get("minQty", 0.0) or 0.0))
|
||||
elif flt_type == "MIN_NOTIONAL":
|
||||
min_notional = max(min_notional, float(flt.get("notional", 0.0) or 0.0))
|
||||
rules[contract_symbol] = SymbolRule(
|
||||
contract_symbol=contract_symbol,
|
||||
base_asset=str(row.get("baseAsset", "")).upper(),
|
||||
quote_asset=str(row.get("quoteAsset", "")).upper(),
|
||||
step_size=step_size or 0.001,
|
||||
min_qty=min_qty or step_size or 0.001,
|
||||
min_notional=min_notional,
|
||||
)
|
||||
self._rules = rules
|
||||
return rules
|
||||
|
||||
@staticmethod
|
||||
def _normalize_qty(rule: SymbolRule, raw_qty: float) -> float:
|
||||
if abs(raw_qty) <= 0:
|
||||
return 0.0
|
||||
sign = 1.0 if raw_qty > 0 else -1.0
|
||||
step = max(rule.step_size, 1e-12)
|
||||
qty = math.floor(abs(raw_qty) / step) * step
|
||||
if qty < max(rule.min_qty, step):
|
||||
return 0.0
|
||||
return sign * qty
|
||||
|
||||
@staticmethod
|
||||
def _split_contract_symbol(contract_symbol: str) -> tuple[str, str]:
|
||||
for quote in KNOWN_QUOTES:
|
||||
if contract_symbol.endswith(quote):
|
||||
return contract_symbol[: -len(quote)], quote
|
||||
return contract_symbol, "USDT"
|
||||
129
live/notifier.py
Normal file
129
live/notifier.py
Normal file
@@ -0,0 +1,129 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class NotifierConfig:
|
||||
bot_token: str
|
||||
chat_id: str
|
||||
min_level: str = "INFO"
|
||||
rate_limit_per_sec: float = 5.0
|
||||
|
||||
|
||||
class Notifier:
|
||||
LEVELS = {"INFO": 10, "WARNING": 20, "CRITICAL": 30}
|
||||
|
||||
def __init__(self, config: NotifierConfig | None = None) -> None:
|
||||
self.config = config
|
||||
self.enabled = config is not None and bool(config.bot_token) and bool(config.chat_id)
|
||||
self._queue: asyncio.PriorityQueue[tuple[int, float, str, str]] = asyncio.PriorityQueue(maxsize=1000)
|
||||
self._worker: asyncio.Task | None = None
|
||||
self._stopping = False
|
||||
self._last_send_ts = 0.0
|
||||
|
||||
@classmethod
|
||||
def from_env(cls) -> "Notifier":
|
||||
token = (
|
||||
os.getenv("GAMMA_TELEGRAM_TOKEN", "").strip()
|
||||
or os.getenv("TELEGRAM_BOT_TOKEN", "").strip()
|
||||
)
|
||||
chat_id = (
|
||||
os.getenv("GAMMA_TELEGRAM_CHAT_ID", "").strip()
|
||||
or os.getenv("TELEGRAM_CHAT_ID", "").strip()
|
||||
)
|
||||
min_level = (
|
||||
os.getenv("GAMMA_TELEGRAM_MIN_LEVEL", "").strip().upper()
|
||||
or os.getenv("TELEGRAM_MIN_LEVEL", "INFO").strip().upper()
|
||||
or "INFO"
|
||||
)
|
||||
if not token or not chat_id:
|
||||
return cls(config=None)
|
||||
return cls(config=NotifierConfig(bot_token=token, chat_id=chat_id, min_level=min_level))
|
||||
|
||||
async def start(self) -> None:
|
||||
if not self.enabled or self._worker is not None:
|
||||
return
|
||||
self._stopping = False
|
||||
self._worker = asyncio.create_task(self._worker_loop(), name="strategy32-telegram-notifier")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._stopping = True
|
||||
if self._worker is None:
|
||||
return
|
||||
self._worker.cancel()
|
||||
try:
|
||||
await self._worker
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
self._worker = None
|
||||
|
||||
async def send(self, level: str, message: str) -> bool:
|
||||
level = level.upper().strip()
|
||||
if not self.enabled:
|
||||
return False
|
||||
if self.LEVELS.get(level, 0) < self.LEVELS.get(self.config.min_level.upper(), 10):
|
||||
return False
|
||||
try:
|
||||
priority = 0 if level == "CRITICAL" else 1
|
||||
self._queue.put_nowait((priority, time.time(), level, _mask_sensitive(message)))
|
||||
return True
|
||||
except asyncio.QueueFull:
|
||||
return False
|
||||
|
||||
async def _worker_loop(self) -> None:
|
||||
while not self._stopping:
|
||||
_priority, _ts, level, message = await self._queue.get()
|
||||
text = f"[{level}] {message}"
|
||||
try:
|
||||
await self._send_telegram(text)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
async def _send_telegram(self, text: str) -> None:
|
||||
min_interval = 1.0 / max(1e-6, float(self.config.rate_limit_per_sec))
|
||||
elapsed = time.time() - self._last_send_ts
|
||||
if elapsed < min_interval:
|
||||
await asyncio.sleep(min_interval - elapsed)
|
||||
|
||||
encoded = urllib.parse.urlencode(
|
||||
{
|
||||
"chat_id": self.config.chat_id,
|
||||
"text": text,
|
||||
"disable_web_page_preview": "true",
|
||||
}
|
||||
).encode("utf-8")
|
||||
url = f"https://api.telegram.org/bot{self.config.bot_token}/sendMessage"
|
||||
|
||||
def _call() -> None:
|
||||
req = urllib.request.Request(
|
||||
url,
|
||||
data=encoded,
|
||||
method="POST",
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
raw = resp.read().decode("utf-8")
|
||||
data = json.loads(raw)
|
||||
if not isinstance(data, dict) or not bool(data.get("ok")):
|
||||
raise RuntimeError("telegram_send_failed")
|
||||
|
||||
await asyncio.to_thread(_call)
|
||||
self._last_send_ts = time.time()
|
||||
|
||||
|
||||
def _mask_sensitive(text: str) -> str:
|
||||
out = str(text)
|
||||
out = re.sub(r"0x[a-fA-F0-9]{64}", "0x***MASKED_PRIVATE_KEY***", out)
|
||||
out = re.sub(r"0x[a-fA-F0-9]{40}", "0x***MASKED_ADDRESS***", out)
|
||||
out = re.sub(r"\b\d{7,}:[A-Za-z0-9_-]{20,}\b", "***MASKED_TOKEN***", out)
|
||||
return out
|
||||
1071
live/runtime.py
Normal file
1071
live/runtime.py
Normal file
File diff suppressed because it is too large
Load Diff
693
research/adverse_regime.py
Normal file
693
research/adverse_regime.py
Normal file
@@ -0,0 +1,693 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import asdict, dataclass
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.backtest.metrics import cagr, max_drawdown, sharpe_ratio
|
||||
from strategy29.common.constants import BTC_SYMBOL
|
||||
from strategy29.common.models import MarketDataBundle
|
||||
from strategy29.data.universe import select_tradeable_universe
|
||||
from strategy32.scripts.run_regime_filter_analysis import build_strategic_regime_frame
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AdverseRegimeEngineSpec:
|
||||
name: str
|
||||
target_regime: str
|
||||
family: str
|
||||
min_avg_dollar_volume: float = 50_000_000.0
|
||||
rebalance_bars: int = 6
|
||||
top_n: int = 2
|
||||
transaction_cost_pct: float = 0.0015
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AdverseRegimeEngineResult:
|
||||
name: str
|
||||
target_regime: str
|
||||
family: str
|
||||
total_return: float
|
||||
cagr: float
|
||||
sharpe: float
|
||||
max_drawdown: float
|
||||
active_bar_ratio: float
|
||||
rebalance_count: int
|
||||
equity_curve: pd.Series
|
||||
|
||||
def to_payload(self) -> dict[str, object]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"target_regime": self.target_regime,
|
||||
"family": self.family,
|
||||
"total_return": self.total_return,
|
||||
"cagr": self.cagr,
|
||||
"sharpe": self.sharpe,
|
||||
"max_drawdown": self.max_drawdown,
|
||||
"active_bar_ratio": self.active_bar_ratio,
|
||||
"rebalance_count": self.rebalance_count,
|
||||
}
|
||||
|
||||
|
||||
def default_engine_specs() -> list[AdverseRegimeEngineSpec]:
|
||||
return [
|
||||
AdverseRegimeEngineSpec("cap_cash", "CAPITULATION_STRESS", "cash", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("cap_btc_rebound", "CAPITULATION_STRESS", "btc_rebound", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("cap_alt_panic_rebound", "CAPITULATION_STRESS", "alt_panic_rebound", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("cap_funding_snapback_hedged", "CAPITULATION_STRESS", "funding_snapback_hedged", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("chop_cash", "CHOPPY_ROTATION", "cash", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("chop_pairs_mean_revert", "CHOPPY_ROTATION", "pairs_mean_revert", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("chop_quality_rotation", "CHOPPY_ROTATION", "quality_rotation", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("chop_carry_only", "CHOPPY_ROTATION", "carry_only", rebalance_bars=6),
|
||||
AdverseRegimeEngineSpec("chop_rs_spread", "CHOPPY_ROTATION", "rs_spread", rebalance_bars=6),
|
||||
AdverseRegimeEngineSpec("chop_btc_hedged_leader", "CHOPPY_ROTATION", "btc_hedged_leader", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("chop_carry_strict", "CHOPPY_ROTATION", "carry_only_strict", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("chop_inverse_carry", "CHOPPY_ROTATION", "inverse_carry", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("chop_inverse_carry_strict", "CHOPPY_ROTATION", "inverse_carry_strict", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_cash", "DISTRIBUTION_DRIFT", "cash", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("dist_btc_vs_weak_alt", "DISTRIBUTION_DRIFT", "btc_vs_weak_alt", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("dist_short_rally", "DISTRIBUTION_DRIFT", "short_rally", rebalance_bars=1),
|
||||
AdverseRegimeEngineSpec("dist_weak_basket_short", "DISTRIBUTION_DRIFT", "weak_basket_short", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("dist_relative_weakness_spread", "DISTRIBUTION_DRIFT", "relative_weakness_spread", rebalance_bars=6),
|
||||
AdverseRegimeEngineSpec("dist_btc_rally_short", "DISTRIBUTION_DRIFT", "btc_rally_short", rebalance_bars=1, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_btc_rally_short_strict", "DISTRIBUTION_DRIFT", "btc_rally_short_strict", rebalance_bars=1, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_weak_rally_spread", "DISTRIBUTION_DRIFT", "weak_rally_spread", rebalance_bars=3),
|
||||
AdverseRegimeEngineSpec("dist_inverse_carry", "DISTRIBUTION_DRIFT", "inverse_carry", rebalance_bars=6, top_n=1),
|
||||
AdverseRegimeEngineSpec("dist_inverse_carry_strict", "DISTRIBUTION_DRIFT", "inverse_carry_strict", rebalance_bars=6, top_n=1),
|
||||
]
|
||||
|
||||
|
||||
def load_fixed66_cache(path: str | Path) -> tuple[MarketDataBundle, pd.Timestamp, list[str]]:
|
||||
payload = pickle.loads(Path(path).read_bytes())
|
||||
return payload["bundle"], payload["latest_bar"], list(payload["accepted_symbols"])
|
||||
|
||||
|
||||
class AdverseRegimeResearchHarness:
|
||||
def __init__(self, bundle: MarketDataBundle, latest_bar: pd.Timestamp):
|
||||
self.bundle = bundle
|
||||
self.latest_bar = pd.Timestamp(latest_bar)
|
||||
self.timestamps = sorted(bundle.prices[BTC_SYMBOL]["timestamp"].tolist())
|
||||
self._regime_frame_cache: dict[pd.Timestamp, pd.DataFrame] = {}
|
||||
self.price_frames = {
|
||||
symbol: df.set_index("timestamp")[["close", "volume"]].sort_index()
|
||||
for symbol, df in bundle.prices.items()
|
||||
}
|
||||
self.funding_frames = {
|
||||
symbol: df.set_index("timestamp")[["funding_rate", "basis"]].sort_index()
|
||||
for symbol, df in bundle.funding.items()
|
||||
}
|
||||
|
||||
def build_regime_frame(self, eval_start: pd.Timestamp) -> pd.DataFrame:
|
||||
eval_start = pd.Timestamp(eval_start)
|
||||
cached = self._regime_frame_cache.get(eval_start)
|
||||
if cached is not None:
|
||||
return cached
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = MarketDataBundle(
|
||||
prices={symbol: df.loc[df["timestamp"] >= raw_start].copy() for symbol, df in self.bundle.prices.items()},
|
||||
funding={symbol: df.loc[df["timestamp"] >= raw_start].copy() for symbol, df in self.bundle.funding.items()},
|
||||
)
|
||||
frame = build_strategic_regime_frame(sliced, eval_start, self.latest_bar)
|
||||
self._regime_frame_cache[eval_start] = frame
|
||||
return frame
|
||||
|
||||
def run_engine(
|
||||
self,
|
||||
spec: AdverseRegimeEngineSpec,
|
||||
*,
|
||||
eval_start: pd.Timestamp,
|
||||
initial_capital: float = 1000.0,
|
||||
regime_frame: pd.DataFrame | None = None,
|
||||
) -> AdverseRegimeEngineResult:
|
||||
regime_frame = self.build_regime_frame(eval_start) if regime_frame is None else regime_frame
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
timestamps = [ts for ts in self.timestamps if ts >= eval_start]
|
||||
if len(timestamps) < 3:
|
||||
raise ValueError("not enough timestamps for adverse regime simulation")
|
||||
|
||||
equity = initial_capital
|
||||
equity_points = [pd.Timestamp(timestamps[0])]
|
||||
equity_values = [equity]
|
||||
current_weights: dict[str, float] = {}
|
||||
rebalance_count = 0
|
||||
active_bars = 0
|
||||
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = timestamps[i - 1]
|
||||
execution_ts = timestamps[i]
|
||||
|
||||
if current_weights:
|
||||
bar_ret = self._portfolio_return(current_weights, signal_ts, execution_ts)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
|
||||
target_weights = current_weights
|
||||
regime_name = regime_map.get(signal_ts, "")
|
||||
if regime_name != spec.target_regime:
|
||||
target_weights = {}
|
||||
elif (i - 1) % spec.rebalance_bars == 0:
|
||||
target_weights = self._target_weights(spec, signal_ts)
|
||||
if target_weights:
|
||||
active_bars += 1
|
||||
|
||||
turnover = self._turnover(current_weights, target_weights)
|
||||
if turnover > 0:
|
||||
rebalance_count += 1
|
||||
equity *= max(0.0, 1.0 - turnover * spec.transaction_cost_pct)
|
||||
current_weights = target_weights
|
||||
|
||||
equity_points.append(pd.Timestamp(execution_ts))
|
||||
equity_values.append(equity)
|
||||
|
||||
equity_curve = pd.Series(equity_values, index=pd.Index(equity_points, name="timestamp"), dtype=float)
|
||||
return AdverseRegimeEngineResult(
|
||||
name=spec.name,
|
||||
target_regime=spec.target_regime,
|
||||
family=spec.family,
|
||||
total_return=equity_curve.iloc[-1] / equity_curve.iloc[0] - 1.0,
|
||||
cagr=cagr(equity_curve),
|
||||
sharpe=sharpe_ratio(equity_curve, 6),
|
||||
max_drawdown=max_drawdown(equity_curve),
|
||||
active_bar_ratio=active_bars / max(len(timestamps) - 1, 1),
|
||||
rebalance_count=rebalance_count,
|
||||
equity_curve=equity_curve,
|
||||
)
|
||||
|
||||
def target_weights(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
return self._target_weights(spec, pd.Timestamp(timestamp))
|
||||
|
||||
def _target_weights(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
if spec.family == "cash":
|
||||
return {}
|
||||
if spec.family == "btc_rebound":
|
||||
return self._cap_btc_rebound(timestamp)
|
||||
if spec.family == "alt_panic_rebound":
|
||||
return self._cap_alt_panic_rebound(spec, timestamp)
|
||||
if spec.family == "funding_snapback_hedged":
|
||||
return self._cap_funding_snapback_hedged(spec, timestamp)
|
||||
if spec.family == "pairs_mean_revert":
|
||||
return self._chop_pairs_mean_revert(spec, timestamp)
|
||||
if spec.family == "quality_rotation":
|
||||
return self._chop_quality_rotation(spec, timestamp)
|
||||
if spec.family == "carry_only":
|
||||
return self._carry_only(spec, timestamp)
|
||||
if spec.family == "rs_spread":
|
||||
return self._chop_rs_spread(spec, timestamp)
|
||||
if spec.family == "btc_hedged_leader":
|
||||
return self._chop_btc_hedged_leader(spec, timestamp)
|
||||
if spec.family == "carry_only_strict":
|
||||
return self._carry_only_strict(spec, timestamp)
|
||||
if spec.family == "inverse_carry":
|
||||
return self._inverse_carry(spec, timestamp, strict=False)
|
||||
if spec.family == "inverse_carry_strict":
|
||||
return self._inverse_carry(spec, timestamp, strict=True)
|
||||
if spec.family == "btc_vs_weak_alt":
|
||||
return self._dist_btc_vs_weak_alt(spec, timestamp)
|
||||
if spec.family == "short_rally":
|
||||
return self._dist_short_rally(spec, timestamp)
|
||||
if spec.family == "weak_basket_short":
|
||||
return self._dist_weak_basket_short(spec, timestamp)
|
||||
if spec.family == "relative_weakness_spread":
|
||||
return self._dist_relative_weakness_spread(spec, timestamp)
|
||||
if spec.family == "btc_rally_short":
|
||||
return self._dist_btc_rally_short(timestamp)
|
||||
if spec.family == "btc_rally_short_strict":
|
||||
return self._dist_btc_rally_short_strict(timestamp)
|
||||
if spec.family == "weak_rally_spread":
|
||||
return self._dist_weak_rally_spread(spec, timestamp)
|
||||
raise ValueError(f"unsupported family: {spec.family}")
|
||||
|
||||
def _cap_btc_rebound(self, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
hist = self._price_hist(BTC_SYMBOL, timestamp, 24)
|
||||
if len(hist) < 19:
|
||||
return {}
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
ret_1b = self._return_from_hist(hist, 1)
|
||||
if ret_3d > -0.10 or ret_1b <= 0.0:
|
||||
return {}
|
||||
return {BTC_SYMBOL: 1.0}
|
||||
|
||||
def _cap_alt_panic_rebound(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 24)
|
||||
if len(hist) < 19:
|
||||
continue
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
ret_1b = self._return_from_hist(hist, 1)
|
||||
funding = self._latest_funding(symbol, timestamp)
|
||||
if ret_3d > -0.12 or ret_1b <= 0.0:
|
||||
continue
|
||||
score = (-ret_3d) + max(-funding, 0.0) * 200.0 + ret_1b * 2.0
|
||||
candidates.append((score, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
return self._equal_weight(symbols, 1.0)
|
||||
|
||||
def _cap_funding_snapback_hedged(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 24)
|
||||
if len(hist) < 19:
|
||||
continue
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
ret_1b = self._return_from_hist(hist, 1)
|
||||
funding = self._latest_funding(symbol, timestamp)
|
||||
if funding >= 0.0 or ret_3d > -0.08 or ret_1b <= 0.0:
|
||||
continue
|
||||
score = max(-funding, 0.0) * 260.0 + (-ret_3d) * 0.6 + ret_1b
|
||||
candidates.append((score, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
weights = self._equal_weight(symbols, 0.70)
|
||||
weights[BTC_SYMBOL] = weights.get(BTC_SYMBOL, 0.0) - 0.30
|
||||
return weights
|
||||
|
||||
def _chop_pairs_mean_revert(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
rows: list[tuple[float, float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 18)
|
||||
if len(hist) < 7:
|
||||
continue
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
vol = float(hist["close"].pct_change().dropna().tail(12).std(ddof=0))
|
||||
if vol <= 0 or vol > 0.08:
|
||||
continue
|
||||
rows.append((ret_1d, vol, symbol))
|
||||
if len(rows) < spec.top_n * 2:
|
||||
return {}
|
||||
rows.sort(key=lambda row: row[0])
|
||||
longs = [symbol for _, _, symbol in rows[: spec.top_n]]
|
||||
shorts = [symbol for _, _, symbol in rows[-spec.top_n :]]
|
||||
return self._long_short_weights(longs, shorts)
|
||||
|
||||
def _chop_quality_rotation(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
long_rows: list[tuple[float, str]] = []
|
||||
short_rows: list[tuple[float, str]] = []
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
rs_7d = ret_7d - btc_ret_7d
|
||||
long_rows.append((rs_7d - ret_1d, symbol))
|
||||
short_rows.append((-rs_7d + ret_1d, symbol))
|
||||
long_rows.sort(reverse=True)
|
||||
short_rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in long_rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in short_rows[: spec.top_n]]
|
||||
shorts = [symbol for symbol in shorts if symbol not in longs]
|
||||
return self._long_short_weights(longs, shorts[: spec.top_n])
|
||||
|
||||
def _carry_only(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
if symbol not in self.funding_frames:
|
||||
continue
|
||||
f_hist = self.funding_frames[symbol].loc[:timestamp].tail(21)
|
||||
if len(f_hist) < 21:
|
||||
continue
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
basis_vol = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
expected_edge = mean_funding * 18 + max(latest_basis, 0.0) * 0.35 - 0.0030 - basis_vol * 1.5
|
||||
if expected_edge <= 0:
|
||||
continue
|
||||
candidates.append((expected_edge, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
weight = 1.0 / len(symbols)
|
||||
return {f"carry:{symbol}": weight for symbol in symbols}
|
||||
|
||||
def _carry_only_strict(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
if symbol not in self.funding_frames:
|
||||
continue
|
||||
f_hist = self.funding_frames[symbol].loc[:timestamp].tail(21)
|
||||
if len(f_hist) < 21:
|
||||
continue
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
basis_vol = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
positive_ratio = float((f_hist["funding_rate"] > 0).mean())
|
||||
expected_edge = mean_funding * 18 + max(latest_basis, 0.0) * 0.35 - 0.0030 - basis_vol * 1.5
|
||||
if expected_edge <= 0.004 or positive_ratio < 0.75:
|
||||
continue
|
||||
candidates.append((expected_edge, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
return {f"carry:{symbol}": 1.0 / len(symbols) for symbol in symbols}
|
||||
|
||||
def _inverse_carry(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp, *, strict: bool) -> dict[str, float]:
|
||||
candidates: list[tuple[float, str]] = []
|
||||
min_edge = 0.004 if strict else 0.001
|
||||
min_negative_ratio = 0.75 if strict else 0.60
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
if symbol not in self.funding_frames:
|
||||
continue
|
||||
f_hist = self.funding_frames[symbol].loc[:timestamp].tail(21)
|
||||
if len(f_hist) < 21:
|
||||
continue
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
negative_ratio = float((f_hist["funding_rate"] < 0).mean())
|
||||
basis_vol = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
expected_edge = (-mean_funding) * 18 + max(-latest_basis, 0.0) * 0.35 - 0.0030 - basis_vol * 1.5
|
||||
if mean_funding >= 0 or negative_ratio < min_negative_ratio or expected_edge <= min_edge:
|
||||
continue
|
||||
candidates.append((expected_edge, symbol))
|
||||
candidates.sort(reverse=True)
|
||||
symbols = [symbol for _, symbol in candidates[: spec.top_n]]
|
||||
if not symbols:
|
||||
return {}
|
||||
return {f"inverse_carry:{symbol}": 1.0 / len(symbols) for symbol in symbols}
|
||||
|
||||
def _chop_rs_spread(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
rows: list[tuple[float, str]] = []
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
vol = float(hist["close"].pct_change().dropna().tail(12).std(ddof=0))
|
||||
if vol <= 0 or vol > 0.10:
|
||||
continue
|
||||
rows.append((ret_7d - btc_ret_7d - abs(ret_1d) * 0.25, symbol))
|
||||
if len(rows) < spec.top_n * 2:
|
||||
return {}
|
||||
rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in rows[-spec.top_n :]]
|
||||
return self._long_short_weights(longs, shorts)
|
||||
|
||||
def _chop_btc_hedged_leader(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_1d = self._return_from_hist(hist, 6)
|
||||
funding = self._latest_funding(symbol, timestamp)
|
||||
rows.append((ret_7d - btc_ret_7d - max(funding, 0.0) * 80.0 - abs(ret_1d) * 0.15, symbol))
|
||||
rows.sort(reverse=True)
|
||||
if not rows or rows[0][0] <= 0:
|
||||
return {}
|
||||
leader = rows[0][1]
|
||||
return {leader: 0.70, BTC_SYMBOL: -0.30}
|
||||
|
||||
def _dist_btc_vs_weak_alt(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
weak_rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
weak_rows.append((ret_7d - btc_ret_7d + ret_3d, symbol))
|
||||
weak_rows.sort()
|
||||
shorts = [symbol for _, symbol in weak_rows[: spec.top_n]]
|
||||
if not shorts:
|
||||
return {}
|
||||
weights = {BTC_SYMBOL: 0.40}
|
||||
short_weight = -0.60 / len(shorts)
|
||||
for symbol in shorts:
|
||||
weights[symbol] = short_weight
|
||||
return weights
|
||||
|
||||
def _dist_short_rally(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
rows: list[tuple[float, str]] = []
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 36)
|
||||
if len(hist) < 25:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42 if len(hist) > 42 else len(hist) - 1)
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
ema20 = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
close = float(hist["close"].iloc[-1])
|
||||
rs = ret_7d - btc_ret_7d
|
||||
if rs >= -0.03 or ret_2b <= 0.0 or close >= float(ema20):
|
||||
continue
|
||||
score = -rs + ret_2b
|
||||
rows.append((score, symbol))
|
||||
rows.sort(reverse=True)
|
||||
shorts = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
return {symbol: -1.0 / len(shorts) for symbol in shorts} if shorts else {}
|
||||
|
||||
def _dist_weak_basket_short(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
rs = ret_7d - btc_ret_7d
|
||||
rows.append((rs + ret_3d, symbol))
|
||||
rows.sort()
|
||||
shorts = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
return {symbol: -1.0 / len(shorts) for symbol in shorts} if shorts else {}
|
||||
|
||||
def _dist_relative_weakness_spread(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_3d = self._return_from_hist(hist, 18)
|
||||
rows.append((ret_7d - btc_ret_7d + ret_3d * 0.25, symbol))
|
||||
if len(rows) < spec.top_n * 2:
|
||||
return {}
|
||||
rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in rows[-spec.top_n :]]
|
||||
return self._long_short_weights(longs, shorts)
|
||||
|
||||
def _dist_btc_rally_short(self, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
hist = self._price_hist(BTC_SYMBOL, timestamp, 36)
|
||||
if len(hist) < 21:
|
||||
return {}
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
ema20 = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
close = float(hist["close"].iloc[-1])
|
||||
if ret_2b <= 0.0 or close >= float(ema20):
|
||||
return {}
|
||||
return {BTC_SYMBOL: -1.0}
|
||||
|
||||
def _dist_btc_rally_short_strict(self, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
hist = self._price_hist(BTC_SYMBOL, timestamp, 72)
|
||||
if len(hist) < 43:
|
||||
return {}
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ema20 = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
ema50 = hist["close"].ewm(span=50, adjust=False).mean().iloc[-1]
|
||||
close = float(hist["close"].iloc[-1])
|
||||
if ret_2b < 0.035 or ret_7d > -0.02:
|
||||
return {}
|
||||
if close >= float(ema20) or close >= float(ema50):
|
||||
return {}
|
||||
return {BTC_SYMBOL: -1.0}
|
||||
|
||||
def _dist_weak_rally_spread(self, spec: AdverseRegimeEngineSpec, timestamp: pd.Timestamp) -> dict[str, float]:
|
||||
btc_hist = self._price_hist(BTC_SYMBOL, timestamp, 48)
|
||||
if len(btc_hist) < 43:
|
||||
return {}
|
||||
btc_ret_7d = self._return_from_hist(btc_hist, 42)
|
||||
strong_rows: list[tuple[float, str]] = []
|
||||
weak_rows: list[tuple[float, str]] = []
|
||||
for symbol in self._liquid_symbols(timestamp, spec.min_avg_dollar_volume):
|
||||
hist = self._price_hist(symbol, timestamp, 48)
|
||||
if len(hist) < 43:
|
||||
continue
|
||||
ret_7d = self._return_from_hist(hist, 42)
|
||||
ret_2b = self._return_from_hist(hist, 2)
|
||||
rs = ret_7d - btc_ret_7d
|
||||
strong_rows.append((rs - abs(ret_2b) * 0.1, symbol))
|
||||
if ret_2b > 0:
|
||||
weak_rows.append((-rs + ret_2b, symbol))
|
||||
strong_rows.sort(reverse=True)
|
||||
weak_rows.sort(reverse=True)
|
||||
longs = [symbol for _, symbol in strong_rows[: spec.top_n]]
|
||||
shorts = [symbol for _, symbol in weak_rows[: spec.top_n] if symbol not in longs]
|
||||
return self._long_short_weights(longs, shorts[: spec.top_n])
|
||||
|
||||
def _liquid_symbols(self, timestamp: pd.Timestamp, min_avg_dollar_volume: float) -> list[str]:
|
||||
return [
|
||||
symbol
|
||||
for symbol in select_tradeable_universe(
|
||||
self.bundle.prices,
|
||||
timestamp,
|
||||
min_history_bars=120,
|
||||
min_avg_dollar_volume=min_avg_dollar_volume,
|
||||
)
|
||||
if symbol != BTC_SYMBOL
|
||||
]
|
||||
|
||||
def _price_hist(self, symbol: str, timestamp: pd.Timestamp, bars: int) -> pd.DataFrame:
|
||||
return self.price_frames[symbol].loc[:timestamp].tail(bars).reset_index()
|
||||
|
||||
def _return_from_hist(self, hist: pd.DataFrame, bars_back: int) -> float:
|
||||
if hist.empty:
|
||||
return 0.0
|
||||
back = min(bars_back, len(hist) - 1)
|
||||
if back <= 0:
|
||||
return 0.0
|
||||
prev_close = float(hist["close"].iloc[-(back + 1)])
|
||||
close = float(hist["close"].iloc[-1])
|
||||
if prev_close <= 0:
|
||||
return 0.0
|
||||
return close / prev_close - 1.0
|
||||
|
||||
def _latest_funding(self, symbol: str, timestamp: pd.Timestamp) -> float:
|
||||
if symbol not in self.funding_frames:
|
||||
return 0.0
|
||||
hist = self.funding_frames[symbol].loc[:timestamp].tail(1)
|
||||
if hist.empty:
|
||||
return 0.0
|
||||
return float(hist["funding_rate"].iloc[-1])
|
||||
|
||||
def _portfolio_return(self, weights: dict[str, float], prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
total = 0.0
|
||||
for symbol, weight in weights.items():
|
||||
if symbol.startswith("carry:"):
|
||||
total += weight * self._carry_return(symbol.split(":", 1)[1], prev_ts, ts)
|
||||
elif symbol.startswith("inverse_carry:"):
|
||||
total += weight * self._inverse_carry_return(symbol.split(":", 1)[1], prev_ts, ts)
|
||||
else:
|
||||
total += weight * self._price_return(symbol, prev_ts, ts)
|
||||
return total
|
||||
|
||||
def _price_return(self, symbol: str, prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
frame = self.price_frames[symbol]
|
||||
if prev_ts not in frame.index or ts not in frame.index:
|
||||
return 0.0
|
||||
prev_close = float(frame.loc[prev_ts, "close"])
|
||||
close = float(frame.loc[ts, "close"])
|
||||
if prev_close <= 0:
|
||||
return 0.0
|
||||
return close / prev_close - 1.0
|
||||
|
||||
def _carry_return(self, symbol: str, prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
if symbol not in self.funding_frames:
|
||||
return 0.0
|
||||
frame = self.funding_frames[symbol]
|
||||
if prev_ts not in frame.index or ts not in frame.index:
|
||||
return 0.0
|
||||
funding_rate = float(frame.loc[ts, "funding_rate"])
|
||||
basis_change = float(frame.loc[ts, "basis"] - frame.loc[prev_ts, "basis"])
|
||||
return funding_rate - basis_change
|
||||
|
||||
def _inverse_carry_return(self, symbol: str, prev_ts: pd.Timestamp, ts: pd.Timestamp) -> float:
|
||||
if symbol not in self.funding_frames:
|
||||
return 0.0
|
||||
frame = self.funding_frames[symbol]
|
||||
if prev_ts not in frame.index or ts not in frame.index:
|
||||
return 0.0
|
||||
funding_rate = float(frame.loc[ts, "funding_rate"])
|
||||
basis_change = float(frame.loc[ts, "basis"] - frame.loc[prev_ts, "basis"])
|
||||
return -funding_rate + basis_change
|
||||
|
||||
@staticmethod
|
||||
def _turnover(current: dict[str, float], target: dict[str, float]) -> float:
|
||||
symbols = set(current) | set(target)
|
||||
return sum(abs(target.get(symbol, 0.0) - current.get(symbol, 0.0)) for symbol in symbols)
|
||||
|
||||
@staticmethod
|
||||
def _equal_weight(symbols: list[str], gross: float) -> dict[str, float]:
|
||||
if not symbols:
|
||||
return {}
|
||||
weight = gross / len(symbols)
|
||||
return {symbol: weight for symbol in symbols}
|
||||
|
||||
@staticmethod
|
||||
def _long_short_weights(longs: list[str], shorts: list[str]) -> dict[str, float]:
|
||||
weights: dict[str, float] = {}
|
||||
if longs:
|
||||
long_weight = 0.50 / len(longs)
|
||||
for symbol in longs:
|
||||
weights[symbol] = weights.get(symbol, 0.0) + long_weight
|
||||
if shorts:
|
||||
short_weight = -0.50 / len(shorts)
|
||||
for symbol in shorts:
|
||||
weights[symbol] = weights.get(symbol, 0.0) + short_weight
|
||||
return {symbol: weight for symbol, weight in weights.items() if abs(weight) > 1e-9}
|
||||
|
||||
|
||||
def run_adverse_regime_search(
|
||||
*,
|
||||
cache_path: str | Path,
|
||||
eval_days: int = 1825,
|
||||
initial_capital: float = 1000.0,
|
||||
) -> dict[str, object]:
|
||||
bundle, latest_bar, accepted_symbols = load_fixed66_cache(cache_path)
|
||||
harness = AdverseRegimeResearchHarness(bundle, latest_bar)
|
||||
eval_start = pd.Timestamp(latest_bar) - pd.Timedelta(days=eval_days)
|
||||
|
||||
rows: list[dict[str, object]] = []
|
||||
by_regime: dict[str, list[dict[str, object]]] = {}
|
||||
for spec in default_engine_specs():
|
||||
result = harness.run_engine(spec, eval_start=eval_start, initial_capital=initial_capital)
|
||||
payload = result.to_payload()
|
||||
print(
|
||||
spec.target_regime,
|
||||
spec.name,
|
||||
f"ret={float(payload['total_return']) * 100:.2f}%",
|
||||
f"sharpe={float(payload['sharpe']):.2f}",
|
||||
f"mdd={float(payload['max_drawdown']) * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
rows.append(payload)
|
||||
by_regime.setdefault(spec.target_regime, []).append(payload)
|
||||
|
||||
for regime_rows in by_regime.values():
|
||||
regime_rows.sort(key=lambda row: (float(row["total_return"]), float(row["sharpe"]), -abs(float(row["max_drawdown"]))), reverse=True)
|
||||
|
||||
return {
|
||||
"analysis": "adverse_regime_engine_search",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"accepted_symbols": accepted_symbols,
|
||||
"eval_days": eval_days,
|
||||
"initial_capital": initial_capital,
|
||||
"results": rows,
|
||||
"by_regime": by_regime,
|
||||
}
|
||||
326
research/hybrid_regime.py
Normal file
326
research/hybrid_regime.py
Normal file
@@ -0,0 +1,326 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.backtest.metrics import cagr, max_drawdown, sharpe_ratio
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy29.common.models import AllocationDecision, BacktestResult, MarketDataBundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester, Strategy32MomentumCarryBacktester, build_engine_config
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness, default_engine_specs
|
||||
from strategy32.scripts.run_regime_filter_analysis import build_strategic_regime_frame
|
||||
|
||||
|
||||
STATIC_FILTERS: dict[str, dict[str, float]] = {
|
||||
"prev_balanced": {
|
||||
"universe_min_avg_dollar_volume": 50_000_000.0,
|
||||
"momentum_min_score": 0.60,
|
||||
"momentum_min_relative_strength": 0.00,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"max_pairwise_correlation": 0.70,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
"guarded_positive": {
|
||||
"universe_min_avg_dollar_volume": 50_000_000.0,
|
||||
"momentum_min_score": 0.60,
|
||||
"momentum_min_relative_strength": 0.00,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"momentum_max_7d_return": 0.35,
|
||||
"momentum_min_positive_bar_ratio": 0.52,
|
||||
"momentum_max_short_volatility": 0.075,
|
||||
"momentum_max_beta": 2.50,
|
||||
"momentum_max_latest_funding_rate": 0.00045,
|
||||
"max_pairwise_correlation": 0.70,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
"overheat_tolerant": {
|
||||
"universe_min_avg_dollar_volume": 100_000_000.0,
|
||||
"momentum_min_score": 0.60,
|
||||
"momentum_min_relative_strength": -0.02,
|
||||
"momentum_min_7d_return": 0.02,
|
||||
"max_pairwise_correlation": 0.78,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
"guarded_euphoria": {
|
||||
"universe_min_avg_dollar_volume": 100_000_000.0,
|
||||
"momentum_min_score": 0.62,
|
||||
"momentum_min_relative_strength": -0.01,
|
||||
"momentum_min_7d_return": 0.02,
|
||||
"momentum_max_7d_return": 0.28,
|
||||
"momentum_min_positive_bar_ratio": 0.55,
|
||||
"momentum_max_short_volatility": 0.070,
|
||||
"momentum_max_beta": 2.20,
|
||||
"momentum_max_latest_funding_rate": 0.00035,
|
||||
"max_pairwise_correlation": 0.72,
|
||||
"carry_min_expected_edge": 0.0,
|
||||
},
|
||||
}
|
||||
|
||||
STATIC_FILTER_ATTRS = tuple(sorted({key for overrides in STATIC_FILTERS.values() for key in overrides}))
|
||||
STATIC_COMPONENT_MAP = {
|
||||
"MOMENTUM_EXPANSION": "prev_balanced",
|
||||
"EUPHORIC_BREAKOUT": "overheat_tolerant",
|
||||
}
|
||||
ADVERSE_COMPONENT_MAP = {
|
||||
"CAPITULATION_STRESS": "cap_btc_rebound",
|
||||
"CHOPPY_ROTATION": "chop_inverse_carry_strict",
|
||||
"DISTRIBUTION_DRIFT": "dist_inverse_carry_strict",
|
||||
}
|
||||
ADVERSE_REGIMES = {"CAPITULATION_STRESS", "CHOPPY_ROTATION", "DISTRIBUTION_DRIFT"}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class HybridWindowResult:
|
||||
label: str
|
||||
start: pd.Timestamp
|
||||
end: pd.Timestamp
|
||||
total_return: float
|
||||
annualized_return: float
|
||||
sharpe: float
|
||||
max_drawdown: float
|
||||
component_map: dict[str, str]
|
||||
|
||||
def to_payload(self) -> dict[str, object]:
|
||||
return {
|
||||
"start": str(self.start),
|
||||
"end": str(self.end),
|
||||
"total_return": self.total_return,
|
||||
"annualized_return": self.annualized_return,
|
||||
"sharpe": self.sharpe,
|
||||
"max_drawdown": self.max_drawdown,
|
||||
"component_map": self.component_map,
|
||||
}
|
||||
|
||||
|
||||
class StrategicRegimeFilterBacktester(Strategy32MomentumCarryBacktester):
|
||||
def __init__(
|
||||
self,
|
||||
strategy_config,
|
||||
data: MarketDataBundle,
|
||||
*,
|
||||
trade_start: pd.Timestamp,
|
||||
strategic_regime_map: dict[pd.Timestamp, str],
|
||||
active_regime: str,
|
||||
default_filter_name: str,
|
||||
filter_plan: dict[pd.Timestamp, str] | None = None,
|
||||
):
|
||||
self._strategic_regime_map = strategic_regime_map
|
||||
self._active_regime = active_regime
|
||||
self._default_filter_name = default_filter_name
|
||||
self._filter_plan = filter_plan or {}
|
||||
super().__init__(strategy_config, data, trade_start=trade_start)
|
||||
|
||||
def _govern_decision(
|
||||
self,
|
||||
decision: AllocationDecision,
|
||||
*,
|
||||
signal_timestamp: pd.Timestamp,
|
||||
current_equity: float,
|
||||
equity_history: list[float],
|
||||
) -> AllocationDecision:
|
||||
governed = super()._govern_decision(
|
||||
decision,
|
||||
signal_timestamp=signal_timestamp,
|
||||
current_equity=current_equity,
|
||||
equity_history=equity_history,
|
||||
)
|
||||
if self._strategic_regime_map.get(signal_timestamp) != self._active_regime:
|
||||
return AllocationDecision(
|
||||
regime=governed.regime,
|
||||
momentum_budget_pct=0.0,
|
||||
carry_budget_pct=0.0,
|
||||
spread_budget_pct=0.0,
|
||||
cash_budget_pct=1.0,
|
||||
)
|
||||
return governed
|
||||
|
||||
def _rebalance(
|
||||
self,
|
||||
portfolio,
|
||||
signal_timestamp: pd.Timestamp,
|
||||
execution_timestamp: pd.Timestamp,
|
||||
decision: AllocationDecision,
|
||||
rebalance_momentum: bool,
|
||||
rebalance_carry: bool,
|
||||
rebalance_spread: bool,
|
||||
) -> list:
|
||||
originals = {attr: getattr(self.strategy_config, attr) for attr in STATIC_FILTER_ATTRS}
|
||||
try:
|
||||
filter_name = self._filter_plan.get(signal_timestamp, self._default_filter_name)
|
||||
for attr, value in STATIC_FILTERS[filter_name].items():
|
||||
setattr(self.strategy_config, attr, value)
|
||||
return super()._rebalance(
|
||||
portfolio,
|
||||
signal_timestamp,
|
||||
execution_timestamp,
|
||||
decision,
|
||||
rebalance_momentum,
|
||||
rebalance_carry,
|
||||
rebalance_spread,
|
||||
)
|
||||
finally:
|
||||
for attr, value in originals.items():
|
||||
setattr(self.strategy_config, attr, value)
|
||||
|
||||
|
||||
def load_fixed66_bundle(path: str | Path) -> tuple[MarketDataBundle, pd.Timestamp]:
|
||||
payload = pickle.loads(Path(path).read_bytes())
|
||||
return payload["bundle"], pd.Timestamp(payload["latest_bar"])
|
||||
|
||||
|
||||
def _run_static_component_curve(
|
||||
*,
|
||||
sliced: MarketDataBundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
eval_start: pd.Timestamp,
|
||||
regime_map: dict[pd.Timestamp, str],
|
||||
active_regime: str,
|
||||
filter_name: str,
|
||||
filter_plan: dict[pd.Timestamp, str] | None = None,
|
||||
) -> pd.Series:
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = StrategicRegimeFilterBacktester(
|
||||
cfg,
|
||||
sliced,
|
||||
trade_start=eval_start,
|
||||
strategic_regime_map=regime_map,
|
||||
active_regime=active_regime,
|
||||
default_filter_name=filter_name,
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
backtester.config.initial_capital = build_engine_config().initial_capital
|
||||
result = backtester.run()
|
||||
return result.equity_curve.loc[result.equity_curve.index >= eval_start]
|
||||
|
||||
|
||||
def _run_adverse_component_curve(
|
||||
*,
|
||||
eval_start: pd.Timestamp,
|
||||
engine_name: str,
|
||||
harness: AdverseRegimeResearchHarness,
|
||||
regime_frame: pd.DataFrame,
|
||||
) -> pd.Series:
|
||||
spec = next(spec for spec in default_engine_specs() if spec.name == engine_name)
|
||||
result = harness.run_engine(spec, eval_start=eval_start, initial_capital=1000.0, regime_frame=regime_frame)
|
||||
return result.equity_curve.loc[result.equity_curve.index >= eval_start]
|
||||
|
||||
|
||||
def _curve_returns(curve: pd.Series) -> pd.Series:
|
||||
return curve.pct_change().fillna(0.0)
|
||||
|
||||
|
||||
def _annualized_return(total_return: float, days: int) -> float:
|
||||
if days <= 0:
|
||||
return 0.0
|
||||
return (1.0 + total_return) ** (365.0 / days) - 1.0
|
||||
|
||||
|
||||
def _build_positive_filter_plan(regime_frame: pd.DataFrame, active_regime: str) -> dict[pd.Timestamp, str]:
|
||||
frame = regime_frame.sort_values("timestamp").copy()
|
||||
frame["is_adverse"] = frame["strategic_regime"].isin(ADVERSE_REGIMES).astype(float)
|
||||
frame["recent_adverse_share"] = frame["is_adverse"].rolling(18, min_periods=1).mean()
|
||||
plan: dict[pd.Timestamp, str] = {}
|
||||
for row in frame.itertuples(index=False):
|
||||
ts = pd.Timestamp(row.timestamp)
|
||||
if active_regime == "MOMENTUM_EXPANSION":
|
||||
guarded = float(row.recent_adverse_share) >= 0.40 or float(row.breadth_persist) < 0.58
|
||||
plan[ts] = "guarded_positive" if guarded else "prev_balanced"
|
||||
elif active_regime == "EUPHORIC_BREAKOUT":
|
||||
guarded = float(row.recent_adverse_share) >= 0.25 or float(row.funding_persist) < 0.72
|
||||
plan[ts] = "guarded_euphoria" if guarded else "overheat_tolerant"
|
||||
return plan
|
||||
|
||||
|
||||
def run_hybrid_backtest(
|
||||
*,
|
||||
cache_path: str | Path = "/tmp/strategy32_fixed66_bundle.pkl",
|
||||
windows: tuple[tuple[int, str], ...] = ((365, "1y"), (730, "2y"), (1095, "3y"), (1460, "4y"), (1825, "5y")),
|
||||
) -> dict[str, object]:
|
||||
bundle, latest_bar = load_fixed66_bundle(cache_path)
|
||||
payload: dict[str, object] = {
|
||||
"analysis": "fixed66_hybrid_regime_backtest",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"static_component_map": STATIC_COMPONENT_MAP,
|
||||
"adverse_component_map": ADVERSE_COMPONENT_MAP,
|
||||
"results": {},
|
||||
}
|
||||
|
||||
for days, label in windows:
|
||||
eval_start = latest_bar - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
regime_frame = build_strategic_regime_frame(sliced, eval_start, latest_bar)
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
|
||||
|
||||
component_curves: dict[str, pd.Series] = {}
|
||||
for regime_name, filter_name in STATIC_COMPONENT_MAP.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, regime_name)
|
||||
component_curves[regime_name] = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=latest_bar,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime=regime_name,
|
||||
filter_name=filter_name,
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
for regime_name, engine_name in ADVERSE_COMPONENT_MAP.items():
|
||||
component_curves[regime_name] = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=engine_name,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
|
||||
return_frames = {name: _curve_returns(curve) for name, curve in component_curves.items()}
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
equity = 1000.0
|
||||
equity_idx = [timestamps[0]]
|
||||
equity_values = [equity]
|
||||
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = timestamps[i - 1]
|
||||
execution_ts = timestamps[i]
|
||||
regime_name = regime_map.get(signal_ts, "")
|
||||
ret = float(return_frames.get(regime_name, pd.Series(dtype=float)).get(execution_ts, 0.0))
|
||||
equity *= max(0.0, 1.0 + ret)
|
||||
equity_idx.append(execution_ts)
|
||||
equity_values.append(equity)
|
||||
|
||||
equity_curve = pd.Series(equity_values, index=pd.Index(equity_idx, name="timestamp"), dtype=float)
|
||||
total_return = float(equity_curve.iloc[-1] / equity_curve.iloc[0] - 1.0)
|
||||
payload["results"][label] = HybridWindowResult(
|
||||
label=label,
|
||||
start=pd.Timestamp(eval_start),
|
||||
end=pd.Timestamp(latest_bar),
|
||||
total_return=total_return,
|
||||
annualized_return=_annualized_return(total_return, days),
|
||||
sharpe=sharpe_ratio(equity_curve, 6),
|
||||
max_drawdown=max_drawdown(equity_curve),
|
||||
component_map={
|
||||
**{regime: filter_name for regime, filter_name in STATIC_COMPONENT_MAP.items()},
|
||||
**{regime: engine_name for regime, engine_name in ADVERSE_COMPONENT_MAP.items()},
|
||||
},
|
||||
).to_payload()
|
||||
print(
|
||||
label,
|
||||
f"ret={total_return * 100:.2f}%",
|
||||
f"ann={payload['results'][label]['annualized_return'] * 100:.2f}%",
|
||||
f"sharpe={payload['results'][label]['sharpe']:.2f}",
|
||||
f"mdd={payload['results'][label]['max_drawdown'] * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
return payload
|
||||
|
||||
|
||||
def write_hybrid_backtest(out_path: str | Path = "/tmp/strategy32_hybrid_regime_backtest.json") -> Path:
|
||||
payload = run_hybrid_backtest()
|
||||
out = Path(out_path)
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
return out
|
||||
848
research/soft_router.py
Normal file
848
research/soft_router.py
Normal file
@@ -0,0 +1,848 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import multiprocessing as mp
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict, dataclass
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness, default_engine_specs
|
||||
from strategy32.research.hybrid_regime import (
|
||||
STATIC_FILTERS,
|
||||
_curve_returns,
|
||||
_run_adverse_component_curve,
|
||||
load_fixed66_bundle,
|
||||
)
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.scripts.run_regime_filter_analysis import STRATEGIC_REGIME_PROFILES, build_strategic_regime_frame
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class SoftRouterCandidate:
|
||||
regime_profile: str
|
||||
core_filter: str
|
||||
cap_engine: str
|
||||
chop_engine: str
|
||||
dist_engine: str
|
||||
core_floor: float
|
||||
cap_max_weight: float
|
||||
chop_max_weight: float
|
||||
dist_max_weight: float
|
||||
chop_blend_floor: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"{self.regime_profile}"
|
||||
f"|core:{self.core_filter}"
|
||||
f"|cap:{self.cap_engine}"
|
||||
f"|chop:{self.chop_engine}"
|
||||
f"|dist:{self.dist_engine}"
|
||||
f"|floor:{self.core_floor:.2f}"
|
||||
f"|capw:{self.cap_max_weight:.2f}"
|
||||
f"|chopw:{self.chop_max_weight:.2f}"
|
||||
f"|distw:{self.dist_max_weight:.2f}"
|
||||
f"|chopf:{self.chop_blend_floor:.2f}"
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CashOverlayCandidate:
|
||||
regime_profile: str
|
||||
core_filter: str
|
||||
cap_engine: str
|
||||
chop_engine: str
|
||||
dist_engine: str
|
||||
cap_cash_weight: float
|
||||
chop_cash_weight: float
|
||||
dist_cash_weight: float
|
||||
cap_threshold: float
|
||||
chop_threshold: float
|
||||
dist_threshold: float
|
||||
core_block_threshold: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"{self.regime_profile}"
|
||||
f"|core:{self.core_filter}"
|
||||
f"|cap:{self.cap_engine}"
|
||||
f"|chop:{self.chop_engine}"
|
||||
f"|dist:{self.dist_engine}"
|
||||
f"|capcw:{self.cap_cash_weight:.2f}"
|
||||
f"|chopcw:{self.chop_cash_weight:.2f}"
|
||||
f"|distcw:{self.dist_cash_weight:.2f}"
|
||||
f"|capth:{self.cap_threshold:.2f}"
|
||||
f"|chopth:{self.chop_threshold:.2f}"
|
||||
f"|distth:{self.dist_threshold:.2f}"
|
||||
f"|block:{self.core_block_threshold:.2f}"
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class MacroScaleSpec:
|
||||
floor: float
|
||||
close_gap_start: float
|
||||
close_gap_full: float
|
||||
fast_gap_start: float
|
||||
fast_gap_full: float
|
||||
close_weight: float = 0.60
|
||||
fast_weeks: int = 10
|
||||
slow_weeks: int = 30
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"floor:{self.floor:.2f}"
|
||||
f"|close:{self.close_gap_start:.3f}->{self.close_gap_full:.3f}"
|
||||
f"|fast:{self.fast_gap_start:.3f}->{self.fast_gap_full:.3f}"
|
||||
f"|w:{self.close_weight:.2f}"
|
||||
)
|
||||
|
||||
|
||||
WINDOWS = (
|
||||
(365, "1y"),
|
||||
(730, "2y"),
|
||||
(1095, "3y"),
|
||||
(1460, "4y"),
|
||||
(1825, "5y"),
|
||||
)
|
||||
|
||||
YEAR_PERIODS = (
|
||||
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
|
||||
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
|
||||
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
|
||||
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
|
||||
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
|
||||
)
|
||||
|
||||
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
|
||||
|
||||
|
||||
def _clip01(value: float) -> float:
|
||||
return min(max(float(value), 0.0), 1.0)
|
||||
|
||||
|
||||
def _ramp(value: float, start: float, end: float) -> float:
|
||||
if end == start:
|
||||
return 1.0 if value >= end else 0.0
|
||||
if value <= start:
|
||||
return 0.0
|
||||
if value >= end:
|
||||
return 1.0
|
||||
return (value - start) / (end - start)
|
||||
|
||||
|
||||
def _inverse_ramp(value: float, start: float, end: float) -> float:
|
||||
if end == start:
|
||||
return 1.0 if value <= end else 0.0
|
||||
if value >= start:
|
||||
return 0.0
|
||||
if value <= end:
|
||||
return 1.0
|
||||
return (start - value) / (start - end)
|
||||
|
||||
|
||||
def build_regime_score_frame(
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
eval_end: pd.Timestamp,
|
||||
*,
|
||||
profile_name: str,
|
||||
) -> pd.DataFrame:
|
||||
profile = STRATEGIC_REGIME_PROFILES[profile_name]
|
||||
frame = build_strategic_regime_frame(bundle, eval_start, eval_end, profile=profile).copy()
|
||||
|
||||
panic_scores: list[float] = []
|
||||
euphoria_scores: list[float] = []
|
||||
expansion_scores: list[float] = []
|
||||
distribution_scores: list[float] = []
|
||||
choppy_scores: list[float] = []
|
||||
core_scores: list[float] = []
|
||||
|
||||
for row in frame.itertuples(index=False):
|
||||
breadth = float(row.breadth)
|
||||
breadth_persist = float(row.breadth_persist) if pd.notna(row.breadth_persist) else breadth
|
||||
atr = float(row.atr_pct) if pd.notna(row.atr_pct) else 0.0
|
||||
bar_ret = float(row.bar_return) if pd.notna(row.bar_return) else 0.0
|
||||
daily_gap = float(row.daily_trend_gap) if pd.notna(row.daily_trend_gap) else 0.0
|
||||
intra_gap = float(row.intraday_trend_gap) if pd.notna(row.intraday_trend_gap) else 0.0
|
||||
avg_funding = float(row.mean_alt_funding)
|
||||
positive_ratio = float(row.positive_funding_ratio)
|
||||
funding_persist = float(row.funding_persist) if pd.notna(row.funding_persist) else positive_ratio
|
||||
btc_7d = float(row.btc_7d_return)
|
||||
|
||||
panic_score = max(
|
||||
_ramp(atr, profile.panic_atr * 0.85, profile.panic_atr * 1.35),
|
||||
_ramp(-bar_ret, abs(profile.panic_bar_return) * 0.75, abs(profile.panic_bar_return) * 1.35),
|
||||
min(
|
||||
_ramp(profile.panic_breadth - breadth, 0.0, max(profile.panic_breadth, 0.15)),
|
||||
_ramp(profile.panic_funding - avg_funding, 0.0, abs(profile.panic_funding) + 0.00015),
|
||||
),
|
||||
)
|
||||
|
||||
euphoria_components = [
|
||||
_ramp(daily_gap, profile.euphoria_daily_gap * 0.75, profile.euphoria_daily_gap * 1.6),
|
||||
_ramp(intra_gap, profile.euphoria_intraday_gap * 0.6, profile.euphoria_intraday_gap * 1.8),
|
||||
_ramp(breadth, profile.euphoria_breadth - 0.05, min(profile.euphoria_breadth + 0.12, 0.95)),
|
||||
_ramp(breadth_persist, profile.euphoria_breadth_persist - 0.06, min(profile.euphoria_breadth_persist + 0.12, 0.95)),
|
||||
_ramp(positive_ratio, profile.euphoria_positive_ratio - 0.08, min(profile.euphoria_positive_ratio + 0.12, 0.98)),
|
||||
_ramp(funding_persist, profile.euphoria_funding_persist - 0.08, min(profile.euphoria_funding_persist + 0.12, 0.98)),
|
||||
max(
|
||||
_ramp(avg_funding, profile.euphoria_funding * 0.5, max(profile.euphoria_funding * 2.0, profile.euphoria_funding + 0.00008)),
|
||||
_ramp(btc_7d, profile.euphoria_btc_7d * 0.6, max(profile.euphoria_btc_7d * 1.8, profile.euphoria_btc_7d + 0.08)),
|
||||
),
|
||||
]
|
||||
euphoria_score = sum(euphoria_components) / len(euphoria_components)
|
||||
|
||||
expansion_components = [
|
||||
_ramp(daily_gap, max(profile.expansion_daily_gap - 0.02, -0.02), profile.expansion_daily_gap + 0.06),
|
||||
_ramp(intra_gap, profile.expansion_intraday_gap - 0.01, profile.expansion_intraday_gap + 0.05),
|
||||
_ramp(breadth, profile.expansion_breadth - 0.06, min(profile.expansion_breadth + 0.14, 0.92)),
|
||||
_ramp(breadth_persist, profile.expansion_breadth_persist - 0.06, min(profile.expansion_breadth_persist + 0.14, 0.92)),
|
||||
_inverse_ramp(atr, profile.expansion_atr * 1.10, max(profile.expansion_atr * 0.60, 0.015)),
|
||||
_ramp(avg_funding, profile.expansion_min_funding - 0.00005, profile.expansion_min_funding + 0.00015),
|
||||
_ramp(btc_7d, profile.expansion_btc_7d - 0.04, profile.expansion_btc_7d + 0.10),
|
||||
]
|
||||
expansion_score = sum(expansion_components) / len(expansion_components)
|
||||
expansion_score *= 1.0 - 0.55 * euphoria_score
|
||||
|
||||
distribution_components = [
|
||||
max(
|
||||
_ramp(profile.distribution_daily_gap - daily_gap, 0.0, abs(profile.distribution_daily_gap) + 0.05),
|
||||
_ramp(profile.distribution_intraday_gap - intra_gap, 0.0, abs(profile.distribution_intraday_gap) + 0.04),
|
||||
),
|
||||
_ramp(profile.distribution_breadth - breadth, 0.0, max(profile.distribution_breadth, 0.18)),
|
||||
_ramp(profile.distribution_positive_ratio - positive_ratio, 0.0, max(profile.distribution_positive_ratio, 0.18)),
|
||||
_ramp(-avg_funding, 0.0, 0.00020),
|
||||
]
|
||||
distribution_score = sum(distribution_components) / len(distribution_components)
|
||||
distribution_score *= 1.0 - 0.35 * panic_score
|
||||
|
||||
trendlessness = 1.0 - max(
|
||||
_clip01(abs(daily_gap) / max(profile.euphoria_daily_gap, 0.03)),
|
||||
_clip01(abs(intra_gap) / max(profile.euphoria_intraday_gap, 0.015)),
|
||||
)
|
||||
centered_breadth = 1.0 - min(abs(breadth - 0.5) / 0.30, 1.0)
|
||||
funding_neutral = 1.0 - min(abs(avg_funding) / 0.00012, 1.0)
|
||||
choppy_score = (trendlessness + centered_breadth + funding_neutral) / 3.0
|
||||
choppy_score *= 1.0 - max(euphoria_score, expansion_score, distribution_score, panic_score) * 0.65
|
||||
choppy_score = max(choppy_score, 0.0)
|
||||
|
||||
core_score = max(expansion_score, euphoria_score)
|
||||
|
||||
panic_scores.append(_clip01(panic_score))
|
||||
euphoria_scores.append(_clip01(euphoria_score))
|
||||
expansion_scores.append(_clip01(expansion_score))
|
||||
distribution_scores.append(_clip01(distribution_score))
|
||||
choppy_scores.append(_clip01(choppy_score))
|
||||
core_scores.append(_clip01(core_score))
|
||||
|
||||
frame["panic_score"] = panic_scores
|
||||
frame["euphoria_score"] = euphoria_scores
|
||||
frame["expansion_score"] = expansion_scores
|
||||
frame["distribution_score"] = distribution_scores
|
||||
frame["choppy_score"] = choppy_scores
|
||||
frame["core_score"] = core_scores
|
||||
return frame
|
||||
|
||||
|
||||
def _annualized_return(total_return: float, days: int) -> float:
|
||||
if days <= 0:
|
||||
return 0.0
|
||||
return (1.0 + total_return) ** (365.0 / days) - 1.0
|
||||
|
||||
|
||||
def segment_metrics(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> dict[str, float]:
|
||||
segment = curve.loc[(curve.index >= start) & (curve.index <= end)].copy()
|
||||
if len(segment) < 2:
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": 0.0,
|
||||
"annualized_return": 0.0,
|
||||
"sharpe": 0.0,
|
||||
"max_drawdown": 0.0,
|
||||
}
|
||||
base = float(segment.iloc[0])
|
||||
if base <= 0:
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": 0.0,
|
||||
"annualized_return": 0.0,
|
||||
"sharpe": 0.0,
|
||||
"max_drawdown": 0.0,
|
||||
}
|
||||
normalized = segment / base * 1000.0
|
||||
total_return = float(normalized.iloc[-1] / normalized.iloc[0] - 1.0)
|
||||
days = max(int((end - start) / pd.Timedelta(days=1)), 1)
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": total_return,
|
||||
"annualized_return": _annualized_return(total_return, days),
|
||||
"sharpe": sharpe_ratio(normalized, 6),
|
||||
"max_drawdown": max_drawdown(normalized),
|
||||
}
|
||||
|
||||
|
||||
def score_candidate(window_results: dict[str, dict[str, float]], year_results: dict[str, dict[str, float]]) -> tuple[float, int, int]:
|
||||
year_returns = [float(metrics["total_return"]) for metrics in year_results.values()]
|
||||
negative_years = sum(ret < 0 for ret in year_returns)
|
||||
mdd_violations = sum(float(metrics["max_drawdown"]) < -0.20 for metrics in window_results.values())
|
||||
|
||||
score = 0.0
|
||||
score += 4.5 * float(window_results["5y"]["annualized_return"])
|
||||
score += 2.0 * float(window_results["1y"]["annualized_return"])
|
||||
score += 1.4 * float(window_results["2y"]["annualized_return"])
|
||||
score += 1.0 * float(window_results["4y"]["annualized_return"])
|
||||
score += 0.6 * float(window_results["3y"]["annualized_return"])
|
||||
score += 1.3 * float(window_results["5y"]["sharpe"])
|
||||
score += 0.6 * float(window_results["1y"]["sharpe"])
|
||||
score += 2.5 * min(year_returns)
|
||||
score += 0.7 * sum(max(ret, 0.0) for ret in year_returns)
|
||||
score -= 3.25 * negative_years
|
||||
score -= 0.9 * mdd_violations
|
||||
for label in ("1y", "2y", "3y", "4y", "5y"):
|
||||
score -= max(0.0, abs(float(window_results[label]["max_drawdown"])) - 0.20) * 5.0
|
||||
return score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def load_component_bundle(cache_path: str | None = None) -> tuple[object, pd.Timestamp]:
|
||||
return load_fixed66_bundle(cache_path or "/tmp/strategy32_fixed66_bundle.pkl")
|
||||
|
||||
|
||||
def compose_soft_router_curve(
|
||||
*,
|
||||
timestamps: list[pd.Timestamp],
|
||||
score_frame: pd.DataFrame,
|
||||
core_returns: pd.Series,
|
||||
cap_returns: pd.Series,
|
||||
chop_returns: pd.Series,
|
||||
dist_returns: pd.Series,
|
||||
candidate: SoftRouterCandidate,
|
||||
) -> tuple[pd.Series, pd.DataFrame]:
|
||||
score_map = score_frame.set_index("timestamp")[
|
||||
["core_score", "panic_score", "choppy_score", "distribution_score"]
|
||||
].sort_index()
|
||||
|
||||
equity = 1000.0
|
||||
idx = [timestamps[0]]
|
||||
vals = [equity]
|
||||
rows: list[dict[str, float | str]] = []
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = pd.Timestamp(timestamps[i - 1])
|
||||
execution_ts = pd.Timestamp(timestamps[i])
|
||||
score_row = score_map.loc[signal_ts] if signal_ts in score_map.index else None
|
||||
if score_row is None:
|
||||
core_score = panic_score = choppy_score = distribution_score = 0.0
|
||||
else:
|
||||
core_score = float(score_row["core_score"])
|
||||
panic_score = float(score_row["panic_score"])
|
||||
choppy_score = float(score_row["choppy_score"])
|
||||
distribution_score = float(score_row["distribution_score"])
|
||||
|
||||
cap_weight = candidate.cap_max_weight * panic_score
|
||||
dist_weight = candidate.dist_max_weight * distribution_score * (1.0 - 0.60 * panic_score)
|
||||
chop_signal = max(choppy_score, candidate.chop_blend_floor * (1.0 - core_score))
|
||||
chop_weight = candidate.chop_max_weight * chop_signal * (1.0 - 0.45 * panic_score)
|
||||
|
||||
overlay_weight = cap_weight + dist_weight + chop_weight
|
||||
if overlay_weight > 0.90:
|
||||
scale = 0.90 / overlay_weight
|
||||
cap_weight *= scale
|
||||
dist_weight *= scale
|
||||
chop_weight *= scale
|
||||
overlay_weight = 0.90
|
||||
|
||||
core_target = candidate.core_floor + (1.0 - candidate.core_floor) * core_score
|
||||
core_weight = max(0.0, core_target * (1.0 - overlay_weight))
|
||||
total_weight = core_weight + cap_weight + chop_weight + dist_weight
|
||||
if total_weight > 1.0:
|
||||
scale = 1.0 / total_weight
|
||||
core_weight *= scale
|
||||
cap_weight *= scale
|
||||
chop_weight *= scale
|
||||
dist_weight *= scale
|
||||
|
||||
bar_ret = (
|
||||
core_weight * float(core_returns.get(execution_ts, 0.0))
|
||||
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
|
||||
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
|
||||
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
|
||||
)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
idx.append(execution_ts)
|
||||
vals.append(equity)
|
||||
rows.append(
|
||||
{
|
||||
"timestamp": execution_ts,
|
||||
"core_weight": core_weight,
|
||||
"cap_weight": cap_weight,
|
||||
"chop_weight": chop_weight,
|
||||
"dist_weight": dist_weight,
|
||||
"cash_weight": max(0.0, 1.0 - core_weight - cap_weight - chop_weight - dist_weight),
|
||||
"core_score": core_score,
|
||||
"panic_score": panic_score,
|
||||
"choppy_score": choppy_score,
|
||||
"distribution_score": distribution_score,
|
||||
"portfolio_return": bar_ret,
|
||||
}
|
||||
)
|
||||
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
|
||||
weights = pd.DataFrame(rows)
|
||||
return curve, weights
|
||||
|
||||
|
||||
def build_period_components(
|
||||
*,
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
eval_end: pd.Timestamp,
|
||||
profile_name: str,
|
||||
core_filter: str,
|
||||
cap_engine: str,
|
||||
chop_engine: str,
|
||||
dist_engine: str,
|
||||
) -> dict[str, object]:
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
score_frame = build_regime_score_frame(sliced, eval_start, eval_end, profile_name=profile_name)
|
||||
regime_frame = score_frame.copy()
|
||||
harness = AdverseRegimeResearchHarness(sliced, eval_end)
|
||||
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[core_filter])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
core_curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
cap_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=cap_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
chop_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=chop_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
dist_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=dist_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
|
||||
return {
|
||||
"score_frame": score_frame,
|
||||
"timestamps": timestamps,
|
||||
"core_returns": _curve_returns(core_curve),
|
||||
"cap_returns": _curve_returns(cap_curve),
|
||||
"chop_returns": _curve_returns(chop_curve),
|
||||
"dist_returns": _curve_returns(dist_curve),
|
||||
}
|
||||
|
||||
|
||||
def build_cash_overlay_period_components(
|
||||
*,
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
eval_end: pd.Timestamp,
|
||||
profile_name: str,
|
||||
core_filter: str,
|
||||
cap_engine: str,
|
||||
chop_engine: str,
|
||||
dist_engine: str,
|
||||
core_config_overrides: dict[str, object] | None = None,
|
||||
macro_scale_spec: MacroScaleSpec | None = None,
|
||||
) -> dict[str, object]:
|
||||
raw_start = eval_start - pd.Timedelta(days=365 if macro_scale_spec is not None else 90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
score_frame = build_regime_score_frame(sliced, eval_start, eval_end, profile_name=profile_name)
|
||||
regime_frame = score_frame.copy()
|
||||
harness = AdverseRegimeResearchHarness(sliced, eval_end)
|
||||
|
||||
core_config = dict(STATIC_FILTERS[core_filter])
|
||||
core_config.update(core_config_overrides or {})
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **core_config)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
core_result = backtester.run()
|
||||
core_curve = core_result.equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
exposure_frame = pd.DataFrame(core_result.metadata.get("exposure_rows", []))
|
||||
if not exposure_frame.empty:
|
||||
exposure_frame = exposure_frame.loc[exposure_frame["timestamp"] >= eval_start].copy()
|
||||
|
||||
cap_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=cap_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
chop_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=chop_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
dist_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=dist_engine,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
macro_scale_map = _build_macro_scale_map(
|
||||
sliced,
|
||||
timestamps=timestamps[:-1],
|
||||
spec=macro_scale_spec,
|
||||
)
|
||||
return {
|
||||
"score_frame": score_frame,
|
||||
"timestamps": timestamps,
|
||||
"core_returns": _curve_returns(core_curve),
|
||||
"core_exposure_frame": exposure_frame,
|
||||
"cap_returns": _curve_returns(cap_curve),
|
||||
"chop_returns": _curve_returns(chop_curve),
|
||||
"dist_returns": _curve_returns(dist_curve),
|
||||
"macro_scale_map": macro_scale_map,
|
||||
}
|
||||
|
||||
|
||||
def _build_macro_scale_map(
|
||||
bundle,
|
||||
*,
|
||||
timestamps: list[pd.Timestamp],
|
||||
spec: MacroScaleSpec | None,
|
||||
) -> pd.Series | None:
|
||||
if spec is None or not timestamps:
|
||||
return None
|
||||
btc_prices = bundle.prices.get("BTC")
|
||||
if btc_prices is None or btc_prices.empty:
|
||||
return None
|
||||
closes = btc_prices.set_index("timestamp")["close"].astype(float).sort_index()
|
||||
daily = closes.resample("1D").last().dropna()
|
||||
weekly = daily.resample("W-SUN").last().dropna()
|
||||
if weekly.empty:
|
||||
return None
|
||||
fast = weekly.ewm(span=spec.fast_weeks, adjust=False).mean()
|
||||
slow = weekly.ewm(span=spec.slow_weeks, adjust=False).mean()
|
||||
frame = pd.DataFrame(
|
||||
{
|
||||
"close_gap": weekly / slow - 1.0,
|
||||
"fast_gap": fast / slow - 1.0,
|
||||
}
|
||||
)
|
||||
close_scale = frame["close_gap"].apply(lambda value: _ramp(float(value), spec.close_gap_start, spec.close_gap_full))
|
||||
fast_scale = frame["fast_gap"].apply(lambda value: _ramp(float(value), spec.fast_gap_start, spec.fast_gap_full))
|
||||
blended = spec.close_weight * close_scale + (1.0 - spec.close_weight) * fast_scale
|
||||
macro_scale = spec.floor + (1.0 - spec.floor) * blended.clip(0.0, 1.0)
|
||||
aligned = macro_scale.reindex(pd.DatetimeIndex(timestamps, name="timestamp"), method="ffill")
|
||||
aligned = aligned.fillna(1.0).clip(spec.floor, 1.0)
|
||||
return aligned.astype(float)
|
||||
|
||||
|
||||
def compose_cash_overlay_curve(
|
||||
*,
|
||||
timestamps: list[pd.Timestamp],
|
||||
score_frame: pd.DataFrame,
|
||||
core_returns: pd.Series,
|
||||
core_exposure_frame: pd.DataFrame,
|
||||
cap_returns: pd.Series,
|
||||
chop_returns: pd.Series,
|
||||
dist_returns: pd.Series,
|
||||
candidate: CashOverlayCandidate,
|
||||
macro_scale_map: pd.Series | None = None,
|
||||
) -> tuple[pd.Series, pd.DataFrame]:
|
||||
score_map = score_frame.set_index("timestamp")[
|
||||
["core_score", "panic_score", "choppy_score", "distribution_score"]
|
||||
].sort_index()
|
||||
if core_exposure_frame.empty:
|
||||
cash_map = pd.Series(1.0, index=pd.DatetimeIndex(timestamps[:-1], name="timestamp"))
|
||||
else:
|
||||
cash_map = core_exposure_frame.set_index("timestamp")["cash_pct"].sort_index()
|
||||
|
||||
equity = 1000.0
|
||||
idx = [timestamps[0]]
|
||||
vals = [equity]
|
||||
rows: list[dict[str, float | str]] = []
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = pd.Timestamp(timestamps[i - 1])
|
||||
execution_ts = pd.Timestamp(timestamps[i])
|
||||
score_row = score_map.loc[signal_ts] if signal_ts in score_map.index else None
|
||||
if score_row is None:
|
||||
core_score = panic_score = choppy_score = distribution_score = 0.0
|
||||
else:
|
||||
core_score = float(score_row["core_score"])
|
||||
panic_score = float(score_row["panic_score"])
|
||||
choppy_score = float(score_row["choppy_score"])
|
||||
distribution_score = float(score_row["distribution_score"])
|
||||
|
||||
macro_scale = float(macro_scale_map.get(signal_ts, 1.0)) if macro_scale_map is not None else 1.0
|
||||
raw_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
|
||||
cash_pct = raw_cash_pct + (1.0 - raw_cash_pct) * (1.0 - macro_scale)
|
||||
cap_signal = _clip01((panic_score - candidate.cap_threshold) / max(1.0 - candidate.cap_threshold, 1e-9))
|
||||
chop_signal = _clip01((choppy_score - candidate.chop_threshold) / max(1.0 - candidate.chop_threshold, 1e-9))
|
||||
dist_signal = _clip01((distribution_score - candidate.dist_threshold) / max(1.0 - candidate.dist_threshold, 1e-9))
|
||||
|
||||
if core_score > candidate.core_block_threshold:
|
||||
chop_signal *= 0.25
|
||||
dist_signal *= 0.35
|
||||
|
||||
cap_weight = cash_pct * candidate.cap_cash_weight * cap_signal
|
||||
chop_weight = cash_pct * candidate.chop_cash_weight * chop_signal
|
||||
dist_weight = cash_pct * candidate.dist_cash_weight * dist_signal
|
||||
overlay_total = cap_weight + chop_weight + dist_weight
|
||||
if overlay_total > cash_pct and overlay_total > 0:
|
||||
scale = cash_pct / overlay_total
|
||||
cap_weight *= scale
|
||||
chop_weight *= scale
|
||||
dist_weight *= scale
|
||||
overlay_total = cash_pct
|
||||
|
||||
bar_ret = (
|
||||
float(core_returns.get(execution_ts, 0.0)) * macro_scale
|
||||
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
|
||||
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
|
||||
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
|
||||
)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
idx.append(execution_ts)
|
||||
vals.append(equity)
|
||||
rows.append(
|
||||
{
|
||||
"timestamp": execution_ts,
|
||||
"raw_core_cash_pct": raw_cash_pct,
|
||||
"core_cash_pct": cash_pct,
|
||||
"macro_scale": macro_scale,
|
||||
"cap_weight": cap_weight,
|
||||
"chop_weight": chop_weight,
|
||||
"dist_weight": dist_weight,
|
||||
"overlay_total": overlay_total,
|
||||
"core_score": core_score,
|
||||
"panic_score": panic_score,
|
||||
"choppy_score": choppy_score,
|
||||
"distribution_score": distribution_score,
|
||||
"portfolio_return": bar_ret,
|
||||
}
|
||||
)
|
||||
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
|
||||
weights = pd.DataFrame(rows)
|
||||
return curve, weights
|
||||
|
||||
|
||||
def evaluate_candidate_exact(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
candidate: SoftRouterCandidate,
|
||||
cache_path: str | None = None,
|
||||
max_workers: int = 6,
|
||||
) -> dict[str, object]:
|
||||
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for days, label in WINDOWS:
|
||||
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
||||
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
||||
|
||||
ctx = mp.get_context("fork")
|
||||
cache_path = cache_path or "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
latest_weights: list[dict[str, object]] = []
|
||||
|
||||
with ProcessPoolExecutor(max_workers=min(max_workers, len(period_specs)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_exact_period_worker,
|
||||
cache_path,
|
||||
asdict(candidate),
|
||||
kind,
|
||||
label,
|
||||
str(start),
|
||||
str(end),
|
||||
): (kind, label)
|
||||
for kind, label, start, end in period_specs
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
kind, label, metrics, weight_tail = future.result()
|
||||
if kind == "window":
|
||||
window_results[label] = metrics
|
||||
else:
|
||||
year_results[label] = metrics
|
||||
if label == "2026_YTD":
|
||||
latest_weights = weight_tail
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": {label: window_results[label] for _, label in WINDOWS},
|
||||
"years": year_results,
|
||||
"latest_weights": latest_weights,
|
||||
"validation": "exact_independent_periods_soft_router",
|
||||
}
|
||||
|
||||
|
||||
def evaluate_cash_overlay_exact(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
candidate: CashOverlayCandidate,
|
||||
cache_path: str | None = None,
|
||||
max_workers: int = 6,
|
||||
core_config_overrides: dict[str, object] | None = None,
|
||||
macro_scale_spec: MacroScaleSpec | None = None,
|
||||
) -> dict[str, object]:
|
||||
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for days, label in WINDOWS:
|
||||
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
||||
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
||||
|
||||
ctx = mp.get_context("fork")
|
||||
cache_path = cache_path or "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
latest_weights: list[dict[str, object]] = []
|
||||
|
||||
with ProcessPoolExecutor(max_workers=min(max_workers, len(period_specs)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_exact_cash_overlay_period_worker,
|
||||
cache_path,
|
||||
asdict(candidate),
|
||||
core_config_overrides or {},
|
||||
asdict(macro_scale_spec) if macro_scale_spec is not None else None,
|
||||
kind,
|
||||
label,
|
||||
str(start),
|
||||
str(end),
|
||||
): (kind, label)
|
||||
for kind, label, start, end in period_specs
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
kind, label, metrics, weight_tail = future.result()
|
||||
if kind == "window":
|
||||
window_results[label] = metrics
|
||||
else:
|
||||
year_results[label] = metrics
|
||||
if label == "2026_YTD":
|
||||
latest_weights = weight_tail
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"macro_scale_spec": asdict(macro_scale_spec) if macro_scale_spec is not None else None,
|
||||
"windows": {label: window_results[label] for _, label in WINDOWS},
|
||||
"years": year_results,
|
||||
"latest_weights": latest_weights,
|
||||
"validation": "exact_independent_periods_cash_overlay",
|
||||
}
|
||||
|
||||
|
||||
def _exact_period_worker(
|
||||
cache_path: str,
|
||||
candidate_payload: dict[str, object],
|
||||
kind: str,
|
||||
label: str,
|
||||
start_text: str,
|
||||
end_text: str,
|
||||
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
|
||||
bundle, _ = load_component_bundle(cache_path)
|
||||
candidate = SoftRouterCandidate(**candidate_payload)
|
||||
eval_start = pd.Timestamp(start_text)
|
||||
eval_end = pd.Timestamp(end_text)
|
||||
components = build_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=eval_end,
|
||||
profile_name=candidate.regime_profile,
|
||||
core_filter=candidate.core_filter,
|
||||
cap_engine=candidate.cap_engine,
|
||||
chop_engine=candidate.chop_engine,
|
||||
dist_engine=candidate.dist_engine,
|
||||
)
|
||||
curve, weights = compose_soft_router_curve(candidate=candidate, **components)
|
||||
weight_tail = weights.tail(1).copy()
|
||||
if not weight_tail.empty and "timestamp" in weight_tail.columns:
|
||||
weight_tail["timestamp"] = weight_tail["timestamp"].astype(str)
|
||||
return kind, label, segment_metrics(curve, eval_start, eval_end), weight_tail.to_dict(orient="records")
|
||||
|
||||
|
||||
def _exact_cash_overlay_period_worker(
|
||||
cache_path: str,
|
||||
candidate_payload: dict[str, object],
|
||||
core_config_overrides_payload: dict[str, object],
|
||||
macro_scale_spec_payload: dict[str, object] | None,
|
||||
kind: str,
|
||||
label: str,
|
||||
start_text: str,
|
||||
end_text: str,
|
||||
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
|
||||
bundle, _ = load_component_bundle(cache_path)
|
||||
candidate = CashOverlayCandidate(**candidate_payload)
|
||||
macro_scale_spec = MacroScaleSpec(**macro_scale_spec_payload) if macro_scale_spec_payload else None
|
||||
eval_start = pd.Timestamp(start_text)
|
||||
eval_end = pd.Timestamp(end_text)
|
||||
components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=eval_end,
|
||||
profile_name=candidate.regime_profile,
|
||||
core_filter=candidate.core_filter,
|
||||
cap_engine=candidate.cap_engine,
|
||||
chop_engine=candidate.chop_engine,
|
||||
dist_engine=candidate.dist_engine,
|
||||
core_config_overrides=core_config_overrides_payload,
|
||||
macro_scale_spec=macro_scale_spec,
|
||||
)
|
||||
curve, weights = compose_cash_overlay_curve(candidate=candidate, **components)
|
||||
weight_tail = weights.tail(1).copy()
|
||||
if not weight_tail.empty and "timestamp" in weight_tail.columns:
|
||||
weight_tail["timestamp"] = weight_tail["timestamp"].astype(str)
|
||||
return kind, label, segment_metrics(curve, eval_start, eval_end), weight_tail.to_dict(orient="records")
|
||||
|
||||
|
||||
def build_full_period_components(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
profile_name: str,
|
||||
core_filter: str,
|
||||
cap_engine: str,
|
||||
chop_engine: str,
|
||||
dist_engine: str,
|
||||
) -> dict[str, object]:
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
return build_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=profile_name,
|
||||
core_filter=core_filter,
|
||||
cap_engine=cap_engine,
|
||||
chop_engine=chop_engine,
|
||||
dist_engine=dist_engine,
|
||||
)
|
||||
1
scripts/__init__.py
Normal file
1
scripts/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from __future__ import annotations
|
||||
104
scripts/run_ablation_analysis.py
Normal file
104
scripts/run_ablation_analysis.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V5_BASELINE, build_strategy32_config
|
||||
from strategy32.data import build_strategy32_market_bundle
|
||||
|
||||
|
||||
WINDOWS = [(30, "1m"), (365, "1y"), (1095, "3y"), (1825, "5y")]
|
||||
|
||||
|
||||
def build_variants() -> list[tuple[str, dict[str, bool]]]:
|
||||
return [
|
||||
("baseline_v5", {}),
|
||||
("no_sideways", {"enable_sideways_engine": False}),
|
||||
("strong_kill_switch", {"enable_strong_kill_switch": True}),
|
||||
("daily_trend_filter", {"enable_daily_trend_filter": True}),
|
||||
("expanded_hedge", {"enable_expanded_hedge": True}),
|
||||
("max_holding_exit", {"enable_max_holding_exit": True}),
|
||||
]
|
||||
|
||||
|
||||
def main() -> None:
|
||||
base = build_strategy32_config(PROFILE_V5_BASELINE)
|
||||
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
|
||||
start = end - pd.Timedelta(days=max(days for days, _ in WINDOWS) + base.warmup_days + 14)
|
||||
|
||||
print("fetching bundle...")
|
||||
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=base.auto_discover_symbols,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=base.discovery_min_quote_volume_24h,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
print("latest", latest_completed_bar)
|
||||
|
||||
results: dict[str, dict[str, dict[str, float | int | str]]] = {}
|
||||
for name, overrides in build_variants():
|
||||
cfg = copy.deepcopy(base)
|
||||
for key, value in overrides.items():
|
||||
setattr(cfg, key, value)
|
||||
variant_results = {}
|
||||
print("\nVARIANT", name)
|
||||
for days, label in WINDOWS:
|
||||
eval_end = latest_completed_bar
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
|
||||
metrics["engine_pnl"] = result.engine_pnl
|
||||
metrics["total_trades"] = result.total_trades
|
||||
variant_results[label] = metrics
|
||||
print(
|
||||
label,
|
||||
"ret",
|
||||
round(float(metrics["total_return"]) * 100, 2),
|
||||
"mdd",
|
||||
round(float(metrics["max_drawdown"]) * 100, 2),
|
||||
"sharpe",
|
||||
round(float(metrics["sharpe"]), 2),
|
||||
"trades",
|
||||
metrics["trade_count"],
|
||||
)
|
||||
results[name] = variant_results
|
||||
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"analysis": "v6_single_change_ablation",
|
||||
"initial_capital": 1000.0,
|
||||
"auto_discover_symbols": base.auto_discover_symbols,
|
||||
"latest_completed_bar": str(latest_completed_bar),
|
||||
"requested_symbols": [] if base.auto_discover_symbols else base.symbols,
|
||||
"accepted_symbols": accepted_symbols,
|
||||
"rejected_symbols": rejected_symbols,
|
||||
"quote_by_symbol": quote_by_symbol,
|
||||
"timeframe": base.timeframe,
|
||||
"results": results,
|
||||
}
|
||||
out = Path("/tmp/strategy32_v6_ablation.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print("\nwrote", out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
39
scripts/run_adverse_regime_engine_search.py
Normal file
39
scripts/run_adverse_regime_engine_search.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.research.adverse_regime import run_adverse_regime_search
|
||||
|
||||
|
||||
def main() -> None:
|
||||
payload = run_adverse_regime_search(
|
||||
cache_path="/tmp/strategy32_fixed66_bundle.pkl",
|
||||
eval_days=1825,
|
||||
initial_capital=1000.0,
|
||||
)
|
||||
out = Path("/tmp/strategy32_adverse_regime_engine_search.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
|
||||
for regime, rows in payload["by_regime"].items():
|
||||
print(regime)
|
||||
for row in rows:
|
||||
print(
|
||||
" ",
|
||||
row["name"],
|
||||
f"ret={float(row['total_return']) * 100:.2f}%",
|
||||
f"sharpe={float(row['sharpe']):.2f}",
|
||||
f"mdd={float(row['max_drawdown']) * 100:.2f}%",
|
||||
f"active={float(row['active_bar_ratio']) * 100:.2f}%",
|
||||
f"rebalance={int(row['rebalance_count'])}",
|
||||
)
|
||||
print(f"wrote {out}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
139
scripts/run_backtest.py
Normal file
139
scripts/run_backtest.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V5_BASELINE, PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.data import (
|
||||
build_strategy32_market_bundle_from_specs,
|
||||
build_strategy32_price_frames_from_specs,
|
||||
resolve_strategy32_pair_specs,
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_WINDOWS = [365, 1095, 1825]
|
||||
|
||||
|
||||
def _slice_price_frames(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
start: pd.Timestamp,
|
||||
end: pd.Timestamp,
|
||||
) -> dict[str, pd.DataFrame]:
|
||||
sliced: dict[str, pd.DataFrame] = {}
|
||||
for symbol, df in prices.items():
|
||||
frame = df.loc[(df["timestamp"] >= start) & (df["timestamp"] <= end)].copy()
|
||||
if not frame.empty:
|
||||
sliced[symbol] = frame.reset_index(drop=True)
|
||||
return sliced
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Run Strategy32 backtest on Binance data")
|
||||
parser.add_argument("--profile", default=PROFILE_V7_DEFAULT, choices=[PROFILE_V5_BASELINE, PROFILE_V7_DEFAULT])
|
||||
parser.add_argument("--symbols", default="")
|
||||
parser.add_argument("--windows", default=",".join(str(days) for days in DEFAULT_WINDOWS))
|
||||
parser.add_argument("--warmup-days", type=int, default=90)
|
||||
parser.add_argument("--timeframe", default="4h")
|
||||
parser.add_argument("--out", default="/tmp/strategy32_backtest_v0.json")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
strategy_config = build_strategy32_config(args.profile)
|
||||
if args.symbols:
|
||||
strategy_config.symbols = [symbol.strip().upper() for symbol in args.symbols.split(",") if symbol.strip()]
|
||||
strategy_config.auto_discover_symbols = False
|
||||
strategy_config.timeframe = args.timeframe
|
||||
strategy_config.warmup_days = args.warmup_days
|
||||
windows = [int(token.strip()) for token in args.windows.split(",") if token.strip()]
|
||||
|
||||
end = pd.Timestamp.utcnow()
|
||||
if end.tzinfo is None:
|
||||
end = end.tz_localize("UTC")
|
||||
else:
|
||||
end = end.tz_convert("UTC")
|
||||
start = end - pd.Timedelta(days=max(windows) + strategy_config.warmup_days + 14)
|
||||
|
||||
specs = resolve_strategy32_pair_specs(
|
||||
symbols=strategy_config.symbols,
|
||||
auto_discover_symbols=strategy_config.auto_discover_symbols,
|
||||
quote_assets=strategy_config.quote_assets,
|
||||
excluded_base_assets=strategy_config.excluded_base_assets,
|
||||
min_quote_volume_24h=strategy_config.discovery_min_quote_volume_24h,
|
||||
)
|
||||
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle_from_specs(
|
||||
specs=specs,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=strategy_config.timeframe,
|
||||
max_staleness_days=strategy_config.max_symbol_staleness_days,
|
||||
)
|
||||
accepted_specs = [spec for spec in specs if spec.base_symbol in set(accepted_symbols)]
|
||||
execution_prices, _, execution_accepted, execution_rejected, _ = build_strategy32_price_frames_from_specs(
|
||||
specs=accepted_specs,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=strategy_config.execution_refinement_timeframe,
|
||||
max_staleness_days=strategy_config.max_symbol_staleness_days,
|
||||
)
|
||||
|
||||
results = {}
|
||||
for days in windows:
|
||||
label = "1y" if days == 365 else "3y" if days == 1095 else "5y" if days == 1825 else f"{days}d"
|
||||
eval_end = latest_completed_bar
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=strategy_config.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
execution_slice = _slice_price_frames(
|
||||
execution_prices,
|
||||
raw_start - pd.Timedelta(hours=24),
|
||||
eval_end,
|
||||
)
|
||||
result = Strategy32Backtester(
|
||||
strategy_config,
|
||||
sliced,
|
||||
trade_start=eval_start,
|
||||
execution_prices=execution_slice,
|
||||
).run()
|
||||
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=6)
|
||||
metrics["engine_pnl"] = result.engine_pnl
|
||||
metrics["total_trades"] = result.total_trades
|
||||
metrics["rejection_summary"] = result.metadata.get("rejection_summary", {})
|
||||
results[label] = metrics
|
||||
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"profile": args.profile,
|
||||
"auto_discover_symbols": strategy_config.auto_discover_symbols,
|
||||
"latest_completed_bar": str(latest_completed_bar),
|
||||
"warmup_days": strategy_config.warmup_days,
|
||||
"requested_symbols": [] if strategy_config.auto_discover_symbols else strategy_config.symbols,
|
||||
"accepted_symbols": accepted_symbols,
|
||||
"rejected_symbols": rejected_symbols,
|
||||
"execution_refinement_timeframe": strategy_config.execution_refinement_timeframe,
|
||||
"execution_refinement_symbols": execution_accepted,
|
||||
"execution_refinement_rejected": execution_rejected,
|
||||
"quote_by_symbol": quote_by_symbol,
|
||||
"timeframe": strategy_config.timeframe,
|
||||
"results": results,
|
||||
}
|
||||
target = Path(args.out)
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
target.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(json.dumps(payload, indent=2))
|
||||
print(f"Wrote {target}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
436
scripts/run_cash_overlay_search.py
Normal file
436
scripts/run_cash_overlay_search.py
Normal file
@@ -0,0 +1,436 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import sys
|
||||
from dataclasses import asdict
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.research.hybrid_regime import STATIC_FILTERS
|
||||
from strategy32.research.soft_router import (
|
||||
WINDOWS,
|
||||
YEAR_PERIODS,
|
||||
YTD_START,
|
||||
CashOverlayCandidate,
|
||||
build_cash_overlay_period_components,
|
||||
compose_cash_overlay_curve,
|
||||
evaluate_cash_overlay_exact,
|
||||
load_component_bundle,
|
||||
score_candidate,
|
||||
segment_metrics,
|
||||
)
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_cash_overlay_search.json")
|
||||
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/017_cash_overlay_탐색결과.md")
|
||||
SOFT_JSON = Path("/tmp/strategy32_best_soft_exact.json")
|
||||
|
||||
PROFILE = "loose_positive"
|
||||
CORE_FILTER = "overheat_tolerant"
|
||||
CAP_ENGINE = "cap_btc_rebound"
|
||||
CHOP_ENGINE = "chop_inverse_carry_strict"
|
||||
DIST_ENGINE = "dist_inverse_carry_strict"
|
||||
|
||||
STATIC_BASELINE = {
|
||||
"name": "overheat_tolerant",
|
||||
"windows": {
|
||||
"1y": {"total_return": 0.1477, "annualized_return": 0.1477, "max_drawdown": -0.1229},
|
||||
"2y": {"total_return": 0.2789, "annualized_return": 0.1309, "max_drawdown": -0.1812},
|
||||
"3y": {"total_return": 0.4912, "annualized_return": 0.1425, "max_drawdown": -0.1931},
|
||||
"4y": {"total_return": 0.3682, "annualized_return": 0.0815, "max_drawdown": -0.3461},
|
||||
"5y": {"total_return": 3.7625, "annualized_return": 0.3664, "max_drawdown": -0.2334},
|
||||
},
|
||||
"years": {
|
||||
"2026_YTD": {"total_return": 0.0, "max_drawdown": 0.0},
|
||||
"2025": {"total_return": 0.0426, "max_drawdown": -0.1323},
|
||||
"2024": {"total_return": 0.1951, "max_drawdown": -0.2194},
|
||||
"2023": {"total_return": 0.4670, "max_drawdown": -0.2155},
|
||||
"2022": {"total_return": 0.0147, "max_drawdown": -0.0662},
|
||||
"2021": {"total_return": 1.9152, "max_drawdown": -0.1258},
|
||||
},
|
||||
}
|
||||
|
||||
EXPOSURE_SUMMARY = {
|
||||
"avg_cash_pct": 0.9379,
|
||||
"median_cash_pct": 1.0,
|
||||
"cash_gt_50_pct": 0.9469,
|
||||
"cash_gt_80_pct": 0.9068,
|
||||
"avg_momentum_pct": 0.0495,
|
||||
"avg_carry_pct": 0.0126,
|
||||
}
|
||||
|
||||
CAP_CASH_WEIGHTS = (0.20, 0.35, 0.50, 0.65)
|
||||
CHOP_CASH_WEIGHTS = (0.10, 0.20, 0.30, 0.40)
|
||||
DIST_CASH_WEIGHTS = (0.05, 0.10, 0.15, 0.20)
|
||||
CAP_THRESHOLDS = (0.20, 0.35, 0.50)
|
||||
CHOP_THRESHOLDS = (0.35, 0.50, 0.65)
|
||||
DIST_THRESHOLDS = (0.35, 0.50, 0.65)
|
||||
CORE_BLOCK_THRESHOLDS = (0.45, 0.60, 0.75)
|
||||
|
||||
|
||||
def _evaluate_from_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
|
||||
window_results = {
|
||||
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
|
||||
for days, label in WINDOWS
|
||||
}
|
||||
year_results = {
|
||||
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
|
||||
for label, start, end_exclusive in YEAR_PERIODS
|
||||
}
|
||||
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
window_results,
|
||||
{k: v for k, v in year_results.items() if k != "2026_YTD"},
|
||||
)
|
||||
return window_results, year_results, score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _exact_static_variant(bundle, latest_bar: pd.Timestamp, filter_name: str) -> dict[str, object]:
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
|
||||
for days, label in WINDOWS:
|
||||
eval_start = latest_bar - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
window_results[label] = segment_metrics(curve, eval_start, latest_bar)
|
||||
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
|
||||
raw_start = start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
curve = backtester.run().equity_curve.loc[lambda s: s.index >= start]
|
||||
year_results[label] = segment_metrics(curve, start, eval_end)
|
||||
|
||||
raw_start = YTD_START - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=YTD_START)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
curve = backtester.run().equity_curve.loc[lambda s: s.index >= YTD_START]
|
||||
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
window_results,
|
||||
{k: v for k, v in year_results.items() if k != "2026_YTD"},
|
||||
)
|
||||
return {
|
||||
"name": filter_name,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"validation": "exact_static_variant",
|
||||
}
|
||||
|
||||
|
||||
def _core_exposure_summary(bundle, latest_bar: pd.Timestamp) -> dict[str, float]:
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[CORE_FILTER])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
exposure_frame = pd.DataFrame(result.metadata.get("exposure_rows", []))
|
||||
exposure_frame = exposure_frame.loc[exposure_frame["timestamp"] >= eval_start].copy()
|
||||
return {
|
||||
"avg_cash_pct": float(exposure_frame["cash_pct"].mean()),
|
||||
"median_cash_pct": float(exposure_frame["cash_pct"].median()),
|
||||
"cash_gt_50_pct": float((exposure_frame["cash_pct"] > 0.50).mean()),
|
||||
"cash_gt_80_pct": float((exposure_frame["cash_pct"] > 0.80).mean()),
|
||||
"avg_momentum_pct": float(exposure_frame["momentum_pct"].mean()),
|
||||
"avg_carry_pct": float(exposure_frame["carry_pct"].mean()),
|
||||
}
|
||||
|
||||
|
||||
def _metric_line(metrics: dict[str, float], *, include_ann: bool) -> str:
|
||||
sharpe = metrics.get("sharpe")
|
||||
if include_ann:
|
||||
parts = [
|
||||
f"ret `{metrics['total_return'] * 100:.2f}%`",
|
||||
f"ann `{metrics['annualized_return'] * 100:.2f}%`",
|
||||
]
|
||||
else:
|
||||
parts = [f"ret `{metrics['total_return'] * 100:.2f}%`"]
|
||||
if sharpe is not None:
|
||||
parts.append(f"sharpe `{sharpe:.2f}`")
|
||||
parts.append(f"mdd `{metrics['max_drawdown'] * 100:.2f}%`")
|
||||
return ", ".join(parts)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle()
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
static_exact = STATIC_BASELINE
|
||||
exposure_summary = EXPOSURE_SUMMARY
|
||||
print("[stage] build 5y overlay components", flush=True)
|
||||
components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=PROFILE,
|
||||
core_filter=CORE_FILTER,
|
||||
cap_engine=CAP_ENGINE,
|
||||
chop_engine=CHOP_ENGINE,
|
||||
dist_engine=DIST_ENGINE,
|
||||
)
|
||||
print("[stage] begin approximate candidate search", flush=True)
|
||||
|
||||
candidates = [
|
||||
CashOverlayCandidate(
|
||||
regime_profile=PROFILE,
|
||||
core_filter=CORE_FILTER,
|
||||
cap_engine=CAP_ENGINE,
|
||||
chop_engine=CHOP_ENGINE,
|
||||
dist_engine=DIST_ENGINE,
|
||||
cap_cash_weight=cap_cash_weight,
|
||||
chop_cash_weight=chop_cash_weight,
|
||||
dist_cash_weight=dist_cash_weight,
|
||||
cap_threshold=cap_threshold,
|
||||
chop_threshold=chop_threshold,
|
||||
dist_threshold=dist_threshold,
|
||||
core_block_threshold=core_block_threshold,
|
||||
)
|
||||
for (
|
||||
cap_cash_weight,
|
||||
chop_cash_weight,
|
||||
dist_cash_weight,
|
||||
cap_threshold,
|
||||
chop_threshold,
|
||||
dist_threshold,
|
||||
core_block_threshold,
|
||||
) in itertools.product(
|
||||
CAP_CASH_WEIGHTS,
|
||||
CHOP_CASH_WEIGHTS,
|
||||
DIST_CASH_WEIGHTS,
|
||||
CAP_THRESHOLDS,
|
||||
CHOP_THRESHOLDS,
|
||||
DIST_THRESHOLDS,
|
||||
CORE_BLOCK_THRESHOLDS,
|
||||
)
|
||||
]
|
||||
|
||||
approx_rows: list[dict[str, object]] = []
|
||||
static_1y_ann = float(static_exact["windows"]["1y"]["annualized_return"])
|
||||
static_5y_ann = float(static_exact["windows"]["5y"]["annualized_return"])
|
||||
static_5y_mdd = float(static_exact["windows"]["5y"]["max_drawdown"])
|
||||
|
||||
for idx, candidate in enumerate(candidates, start=1):
|
||||
curve, weights = compose_cash_overlay_curve(candidate=candidate, **components)
|
||||
window_results, year_results, score, negative_years, mdd_violations = _evaluate_from_curve(curve, latest_bar)
|
||||
beat_static_flags = {
|
||||
"1y_ann": float(window_results["1y"]["annualized_return"]) > static_1y_ann,
|
||||
"5y_ann": float(window_results["5y"]["annualized_return"]) > static_5y_ann,
|
||||
"5y_mdd": float(window_results["5y"]["max_drawdown"]) >= static_5y_mdd,
|
||||
}
|
||||
approx_rows.append(
|
||||
{
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"avg_weights": {
|
||||
"cap": float(weights["cap_weight"].mean()),
|
||||
"chop": float(weights["chop_weight"].mean()),
|
||||
"dist": float(weights["dist_weight"].mean()),
|
||||
"overlay_total": float(weights["overlay_total"].mean()),
|
||||
"core_cash_pct": float(weights["core_cash_pct"].mean()),
|
||||
},
|
||||
"beat_static": beat_static_flags,
|
||||
"validation": "approx_full_curve_slice_cash_overlay",
|
||||
}
|
||||
)
|
||||
if idx % 500 == 0 or idx == len(candidates):
|
||||
print(
|
||||
f"[approx {idx:04d}/{len(candidates)}] "
|
||||
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
|
||||
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
approx_rows.sort(
|
||||
key=lambda row: (
|
||||
int(not row["beat_static"]["5y_ann"]),
|
||||
int(not row["beat_static"]["1y_ann"]),
|
||||
int(row["negative_years"]),
|
||||
int(row["mdd_violations"]),
|
||||
-float(row["score"]),
|
||||
)
|
||||
)
|
||||
|
||||
exact_top: list[dict[str, object]] = []
|
||||
print("[stage] begin exact validation for top candidates", flush=True)
|
||||
for row in approx_rows[:5]:
|
||||
candidate = CashOverlayCandidate(**row["candidate"])
|
||||
print(f"[exact-start] {candidate.name}", flush=True)
|
||||
result = evaluate_cash_overlay_exact(bundle=bundle, latest_bar=latest_bar, candidate=candidate)
|
||||
exact_top.append(result)
|
||||
print(
|
||||
f"[exact] {candidate.name} 1y={result['windows']['1y']['total_return'] * 100:.2f}% "
|
||||
f"5y_ann={result['windows']['5y']['annualized_return'] * 100:.2f}% "
|
||||
f"neg={result['negative_years']} mdd_viol={result['mdd_violations']}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
exact_top.sort(
|
||||
key=lambda row: (
|
||||
int(float(row["windows"]["5y"]["annualized_return"]) <= static_5y_ann),
|
||||
int(float(row["windows"]["1y"]["annualized_return"]) <= static_1y_ann),
|
||||
int(row["negative_years"]),
|
||||
int(row["mdd_violations"]),
|
||||
-float(row["score"]),
|
||||
)
|
||||
)
|
||||
best_exact = exact_top[0]
|
||||
|
||||
soft_exact = json.loads(SOFT_JSON.read_text(encoding="utf-8")) if SOFT_JSON.exists() else None
|
||||
|
||||
payload = {
|
||||
"analysis": "strategy32_cash_overlay_search",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"candidate_count": len(candidates),
|
||||
"core_filter": CORE_FILTER,
|
||||
"engines": {
|
||||
"cap": CAP_ENGINE,
|
||||
"chop": CHOP_ENGINE,
|
||||
"dist": DIST_ENGINE,
|
||||
},
|
||||
"exposure_summary": exposure_summary,
|
||||
"static_exact": static_exact,
|
||||
"summary": approx_rows[:20],
|
||||
"exact_top": exact_top,
|
||||
"best_exact": best_exact,
|
||||
"best_soft_exact": soft_exact,
|
||||
}
|
||||
print("[stage] write outputs", flush=True)
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
|
||||
lines = [
|
||||
"# Strategy32 Cash Overlay 탐색결과",
|
||||
"",
|
||||
"## 1. 목적",
|
||||
"",
|
||||
"정적 core를 줄이던 기존 soft-router를 버리고, `overheat_tolerant` core가 실제로 비워두는 현금 위에만 adverse 엔진을 얹는 cash-overlay 구조를 탐색한다.",
|
||||
"",
|
||||
"## 2. 왜 구조를 바꿨는가",
|
||||
"",
|
||||
f"- core `overheat_tolerant` 5y 평균 현금 비중: `{exposure_summary['avg_cash_pct'] * 100:.2f}%`",
|
||||
f"- core 중앙값 현금 비중: `{exposure_summary['median_cash_pct'] * 100:.2f}%`",
|
||||
f"- 현금 비중 `> 50%` 바 비율: `{exposure_summary['cash_gt_50_pct'] * 100:.2f}%`",
|
||||
f"- 현금 비중 `> 80%` 바 비율: `{exposure_summary['cash_gt_80_pct'] * 100:.2f}%`",
|
||||
"",
|
||||
"즉 기존 soft-router는 이미 대부분 현금인 core를 또 줄이고 있었다. overlay는 core를 대체하는 게 아니라, core가 실제로 안 쓰는 현금에만 들어가야 맞다.",
|
||||
"",
|
||||
"## 3. 탐색 범위",
|
||||
"",
|
||||
f"- profile: `{PROFILE}`",
|
||||
f"- core filter: `{CORE_FILTER}`",
|
||||
f"- cap engine: `{CAP_ENGINE}`",
|
||||
f"- chop engine: `{CHOP_ENGINE}`",
|
||||
f"- dist engine: `{DIST_ENGINE}`",
|
||||
f"- cap cash weights: `{CAP_CASH_WEIGHTS}`",
|
||||
f"- chop cash weights: `{CHOP_CASH_WEIGHTS}`",
|
||||
f"- dist cash weights: `{DIST_CASH_WEIGHTS}`",
|
||||
f"- cap thresholds: `{CAP_THRESHOLDS}`",
|
||||
f"- chop thresholds: `{CHOP_THRESHOLDS}`",
|
||||
f"- dist thresholds: `{DIST_THRESHOLDS}`",
|
||||
f"- core block thresholds: `{CORE_BLOCK_THRESHOLDS}`",
|
||||
f"- candidate count: `{len(candidates)}`",
|
||||
"",
|
||||
"## 4. 정적 core exact 기준선",
|
||||
"",
|
||||
f"- 1y: {_metric_line(static_exact['windows']['1y'], include_ann=False)}",
|
||||
f"- 2y: {_metric_line(static_exact['windows']['2y'], include_ann=True)}",
|
||||
f"- 3y: {_metric_line(static_exact['windows']['3y'], include_ann=True)}",
|
||||
f"- 4y: {_metric_line(static_exact['windows']['4y'], include_ann=True)}",
|
||||
f"- 5y: {_metric_line(static_exact['windows']['5y'], include_ann=True)}",
|
||||
f"- 2026 YTD: {_metric_line(static_exact['years']['2026_YTD'], include_ann=False)}",
|
||||
f"- 2025: {_metric_line(static_exact['years']['2025'], include_ann=False)}",
|
||||
f"- 2024: {_metric_line(static_exact['years']['2024'], include_ann=False)}",
|
||||
f"- 2023: {_metric_line(static_exact['years']['2023'], include_ann=False)}",
|
||||
f"- 2022: {_metric_line(static_exact['years']['2022'], include_ann=False)}",
|
||||
f"- 2021: {_metric_line(static_exact['years']['2021'], include_ann=False)}",
|
||||
"",
|
||||
"## 5. cash-overlay exact 상위 후보",
|
||||
"",
|
||||
]
|
||||
|
||||
for idx, row in enumerate(exact_top, start=1):
|
||||
candidate = row["candidate"]
|
||||
lines.extend(
|
||||
[
|
||||
f"### {idx}. {row['name']}",
|
||||
"",
|
||||
f"- weights: `cap {candidate['cap_cash_weight']:.2f}`, `chop {candidate['chop_cash_weight']:.2f}`, `dist {candidate['dist_cash_weight']:.2f}`",
|
||||
f"- thresholds: `cap {candidate['cap_threshold']:.2f}`, `chop {candidate['chop_threshold']:.2f}`, `dist {candidate['dist_threshold']:.2f}`, `block {candidate['core_block_threshold']:.2f}`",
|
||||
f"- 1y: {_metric_line(row['windows']['1y'], include_ann=False)}",
|
||||
f"- 2y: {_metric_line(row['windows']['2y'], include_ann=True)}",
|
||||
f"- 3y: {_metric_line(row['windows']['3y'], include_ann=True)}",
|
||||
f"- 4y: {_metric_line(row['windows']['4y'], include_ann=True)}",
|
||||
f"- 5y: {_metric_line(row['windows']['5y'], include_ann=True)}",
|
||||
f"- 2026 YTD: {_metric_line(row['years']['2026_YTD'], include_ann=False)}",
|
||||
f"- 2025: {_metric_line(row['years']['2025'], include_ann=False)}",
|
||||
f"- 2024: {_metric_line(row['years']['2024'], include_ann=False)}",
|
||||
f"- 2023: {_metric_line(row['years']['2023'], include_ann=False)}",
|
||||
f"- 2022: {_metric_line(row['years']['2022'], include_ann=False)}",
|
||||
f"- 2021: {_metric_line(row['years']['2021'], include_ann=False)}",
|
||||
f"- score `{row['score']:.3f}`, negative years `{row['negative_years']}`, mdd violations `{row['mdd_violations']}`",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"## 6. 결론",
|
||||
"",
|
||||
(
|
||||
"cash-overlay가 정적 core보다 나아졌는지는 `best_exact`와 `static_exact` 비교로 판단한다. "
|
||||
"핵심 비교 포인트는 `1y`, `5y annualized`, `5y MDD`, 그리고 `2025/2024`의 음수 여부다."
|
||||
),
|
||||
"",
|
||||
f"- best cash-overlay 1y: `{best_exact['windows']['1y']['total_return'] * 100:.2f}%` vs static `{static_exact['windows']['1y']['total_return'] * 100:.2f}%`",
|
||||
f"- best cash-overlay 5y ann: `{best_exact['windows']['5y']['annualized_return'] * 100:.2f}%` vs static `{static_exact['windows']['5y']['annualized_return'] * 100:.2f}%`",
|
||||
f"- best cash-overlay 5y MDD: `{best_exact['windows']['5y']['max_drawdown'] * 100:.2f}%` vs static `{static_exact['windows']['5y']['max_drawdown'] * 100:.2f}%`",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
if soft_exact:
|
||||
lines.extend(
|
||||
[
|
||||
"## 7. 기존 replacement soft-router와 비교",
|
||||
"",
|
||||
f"- previous soft 1y: `{soft_exact['windows']['1y']['total_return'] * 100:.2f}%`",
|
||||
f"- previous soft 5y ann: `{soft_exact['windows']['5y']['annualized_return'] * 100:.2f}%`",
|
||||
f"- previous soft 5y MDD: `{soft_exact['windows']['5y']['max_drawdown'] * 100:.2f}%`",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
OUT_MD.write_text("\n".join(lines), encoding="utf-8")
|
||||
print("[done] cash overlay search complete", flush=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
169
scripts/run_current_relaxed_hybrid_exact.py
Normal file
169
scripts/run_current_relaxed_hybrid_exact.py
Normal file
@@ -0,0 +1,169 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import multiprocessing as mp
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
|
||||
BASELINE_PATH,
|
||||
BEST_CASH_OVERLAY,
|
||||
CACHE_PATH,
|
||||
CURRENT_OVERHEAT_OVERRIDES,
|
||||
OUT_JSON as SEARCH_OUT_JSON,
|
||||
RELAXED_OVERHEAT_OVERRIDES,
|
||||
WINDOWS,
|
||||
YEAR_PERIODS,
|
||||
YTD_START,
|
||||
HybridSwitchCandidate,
|
||||
_compose_hybrid_curve,
|
||||
)
|
||||
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_current_relaxed_hybrid_exact.json")
|
||||
|
||||
BEST_SEARCH_CANDIDATE = HybridSwitchCandidate(
|
||||
positive_regimes=("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
|
||||
core_score_min=0.60,
|
||||
breadth_persist_min=0.50,
|
||||
funding_persist_min=0.55,
|
||||
panic_max=0.20,
|
||||
choppy_max=0.40,
|
||||
distribution_max=0.30,
|
||||
)
|
||||
|
||||
|
||||
def _baseline_summary() -> dict[str, object]:
|
||||
payload = json.loads(BASELINE_PATH.read_text(encoding="utf-8"))
|
||||
variants = payload["variants"]
|
||||
return {name: variants[name]["results"] for name in ("current_overheat", "relaxed_overheat")}
|
||||
|
||||
|
||||
def _period_specs(latest_bar: pd.Timestamp) -> list[tuple[str, str, pd.Timestamp, pd.Timestamp]]:
|
||||
specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for days, label in WINDOWS:
|
||||
specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
||||
specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
||||
return specs
|
||||
|
||||
|
||||
def _period_worker(
|
||||
cache_path: str,
|
||||
candidate_payload: dict[str, object],
|
||||
kind: str,
|
||||
label: str,
|
||||
start_text: str,
|
||||
end_text: str,
|
||||
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
|
||||
bundle, _ = load_component_bundle(cache_path)
|
||||
candidate = HybridSwitchCandidate(**candidate_payload)
|
||||
start = pd.Timestamp(start_text)
|
||||
end = pd.Timestamp(end_text)
|
||||
current = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=start,
|
||||
eval_end=end,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
relaxed = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=start,
|
||||
eval_end=end,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
curve, rows = _compose_hybrid_curve(
|
||||
current_components=current,
|
||||
relaxed_components=relaxed,
|
||||
switch_candidate=candidate,
|
||||
)
|
||||
latest_rows: list[dict[str, object]] = []
|
||||
if label == "2026_YTD":
|
||||
latest_rows = rows.tail(5).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
|
||||
return kind, label, segment_metrics(curve, start, end), latest_rows
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if SEARCH_OUT_JSON.exists():
|
||||
payload = json.loads(SEARCH_OUT_JSON.read_text(encoding="utf-8"))
|
||||
if payload.get("search_top"):
|
||||
best_candidate = HybridSwitchCandidate(**payload["search_top"][0]["candidate"])
|
||||
else:
|
||||
best_candidate = BEST_SEARCH_CANDIDATE
|
||||
else:
|
||||
best_candidate = BEST_SEARCH_CANDIDATE
|
||||
|
||||
_, latest_bar = load_component_bundle(CACHE_PATH)
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
latest_rows: list[dict[str, object]] = []
|
||||
|
||||
specs = _period_specs(latest_bar)
|
||||
ctx = mp.get_context("fork")
|
||||
with ProcessPoolExecutor(max_workers=min(6, len(specs)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_period_worker,
|
||||
CACHE_PATH,
|
||||
asdict(best_candidate),
|
||||
kind,
|
||||
label,
|
||||
str(start),
|
||||
str(end),
|
||||
): (kind, label)
|
||||
for kind, label, start, end in specs
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
kind, label = future_map[future]
|
||||
kind_result, label_result, metrics, latest = future.result()
|
||||
if kind_result == "window":
|
||||
window_results[label_result] = metrics
|
||||
else:
|
||||
year_results[label_result] = metrics
|
||||
if latest:
|
||||
latest_rows = latest
|
||||
print(f"[done] {label_result}", flush=True)
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
payload = {
|
||||
"analysis": "current_relaxed_hybrid_exact",
|
||||
"latest_bar": str(latest_bar),
|
||||
"candidate": asdict(best_candidate),
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": {label: window_results[label] for _, label in WINDOWS},
|
||||
"years": year_results,
|
||||
"latest_rows": latest_rows,
|
||||
"baselines": _baseline_summary(),
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(json.dumps(payload, indent=2))
|
||||
print(f"[saved] {OUT_JSON}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
376
scripts/run_current_relaxed_hybrid_experiment.py
Normal file
376
scripts/run_current_relaxed_hybrid_experiment.py
Normal file
@@ -0,0 +1,376 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
|
||||
from strategy32.research.soft_router import (
|
||||
build_cash_overlay_period_components,
|
||||
load_component_bundle,
|
||||
score_candidate,
|
||||
segment_metrics,
|
||||
)
|
||||
|
||||
|
||||
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
BASELINE_PATH = Path("/tmp/strategy32_recent_core_filter_comparison.json")
|
||||
OUT_JSON = Path("/tmp/strategy32_current_relaxed_hybrid_experiment.json")
|
||||
|
||||
WINDOWS = (
|
||||
(365, "1y"),
|
||||
(730, "2y"),
|
||||
(1095, "3y"),
|
||||
(1460, "4y"),
|
||||
(1825, "5y"),
|
||||
)
|
||||
|
||||
YEAR_PERIODS = (
|
||||
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
|
||||
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
|
||||
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
|
||||
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
|
||||
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
|
||||
)
|
||||
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
|
||||
|
||||
CURRENT_OVERHEAT_OVERRIDES = {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
}
|
||||
|
||||
RELAXED_OVERHEAT_OVERRIDES = {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"momentum_min_score": 0.58,
|
||||
"momentum_min_relative_strength": -0.03,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"universe_min_avg_dollar_volume": 75_000_000.0,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class HybridSwitchCandidate:
|
||||
positive_regimes: tuple[str, ...]
|
||||
core_score_min: float
|
||||
breadth_persist_min: float
|
||||
funding_persist_min: float
|
||||
panic_max: float
|
||||
choppy_max: float
|
||||
distribution_max: float
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
regimes = ",".join(self.positive_regimes)
|
||||
return (
|
||||
f"regimes:{regimes}"
|
||||
f"|core>={self.core_score_min:.2f}"
|
||||
f"|breadth>={self.breadth_persist_min:.2f}"
|
||||
f"|funding>={self.funding_persist_min:.2f}"
|
||||
f"|panic<={self.panic_max:.2f}"
|
||||
f"|choppy<={self.choppy_max:.2f}"
|
||||
f"|dist<={self.distribution_max:.2f}"
|
||||
)
|
||||
|
||||
|
||||
def _clip01(value: float) -> float:
|
||||
return min(max(float(value), 0.0), 1.0)
|
||||
|
||||
|
||||
def _overlay_weights(candidate, score_row: dict[str, float], core_cash_pct: float) -> tuple[float, float, float]:
|
||||
core_score = float(score_row.get("core_score", 0.0))
|
||||
panic_score = float(score_row.get("panic_score", 0.0))
|
||||
choppy_score = float(score_row.get("choppy_score", 0.0))
|
||||
distribution_score = float(score_row.get("distribution_score", 0.0))
|
||||
|
||||
cap_signal = _clip01((panic_score - candidate.cap_threshold) / max(1.0 - candidate.cap_threshold, 1e-9))
|
||||
chop_signal = _clip01((choppy_score - candidate.chop_threshold) / max(1.0 - candidate.chop_threshold, 1e-9))
|
||||
dist_signal = _clip01((distribution_score - candidate.dist_threshold) / max(1.0 - candidate.dist_threshold, 1e-9))
|
||||
if core_score > candidate.core_block_threshold:
|
||||
chop_signal *= 0.25
|
||||
dist_signal *= 0.35
|
||||
|
||||
cap_weight = float(core_cash_pct) * candidate.cap_cash_weight * cap_signal
|
||||
chop_weight = float(core_cash_pct) * candidate.chop_cash_weight * chop_signal
|
||||
dist_weight = float(core_cash_pct) * candidate.dist_cash_weight * dist_signal
|
||||
overlay_total = cap_weight + chop_weight + dist_weight
|
||||
if overlay_total > core_cash_pct and overlay_total > 0.0:
|
||||
scale = float(core_cash_pct) / overlay_total
|
||||
cap_weight *= scale
|
||||
chop_weight *= scale
|
||||
dist_weight *= scale
|
||||
return cap_weight, chop_weight, dist_weight
|
||||
|
||||
|
||||
def _pick_relaxed(score_row: dict[str, float], candidate: HybridSwitchCandidate) -> bool:
|
||||
return (
|
||||
str(score_row.get("strategic_regime")) in candidate.positive_regimes
|
||||
and float(score_row.get("core_score", 0.0)) >= candidate.core_score_min
|
||||
and float(score_row.get("breadth_persist", 0.0) or 0.0) >= candidate.breadth_persist_min
|
||||
and float(score_row.get("funding_persist", 0.0) or 0.0) >= candidate.funding_persist_min
|
||||
and float(score_row.get("panic_score", 0.0)) <= candidate.panic_max
|
||||
and float(score_row.get("choppy_score", 0.0)) <= candidate.choppy_max
|
||||
and float(score_row.get("distribution_score", 0.0)) <= candidate.distribution_max
|
||||
)
|
||||
|
||||
|
||||
def _compose_hybrid_curve(
|
||||
*,
|
||||
current_components: dict[str, object],
|
||||
relaxed_components: dict[str, object],
|
||||
switch_candidate: HybridSwitchCandidate,
|
||||
) -> tuple[pd.Series, pd.DataFrame]:
|
||||
timestamps = list(current_components["timestamps"])
|
||||
score_map = current_components["score_frame"].set_index("timestamp").sort_index()
|
||||
current_cash_map = current_components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
|
||||
relaxed_cash_map = relaxed_components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
|
||||
current_core_returns = current_components["core_returns"]
|
||||
relaxed_core_returns = relaxed_components["core_returns"]
|
||||
cap_returns = current_components["cap_returns"]
|
||||
chop_returns = current_components["chop_returns"]
|
||||
dist_returns = current_components["dist_returns"]
|
||||
|
||||
equity = 1000.0
|
||||
idx = [timestamps[0]]
|
||||
vals = [equity]
|
||||
rows: list[dict[str, object]] = []
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = pd.Timestamp(timestamps[i - 1])
|
||||
execution_ts = pd.Timestamp(timestamps[i])
|
||||
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
|
||||
use_relaxed = _pick_relaxed(score_row, switch_candidate)
|
||||
active_name = "relaxed_overheat" if use_relaxed else "current_overheat"
|
||||
core_returns = relaxed_core_returns if use_relaxed else current_core_returns
|
||||
cash_map = relaxed_cash_map if use_relaxed else current_cash_map
|
||||
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
|
||||
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, core_cash_pct)
|
||||
bar_ret = (
|
||||
float(core_returns.get(execution_ts, 0.0))
|
||||
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
|
||||
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
|
||||
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
|
||||
)
|
||||
equity *= max(0.0, 1.0 + bar_ret)
|
||||
idx.append(execution_ts)
|
||||
vals.append(equity)
|
||||
rows.append(
|
||||
{
|
||||
"timestamp": execution_ts,
|
||||
"active_core": active_name,
|
||||
"core_cash_pct": core_cash_pct,
|
||||
"core_score": float(score_row.get("core_score", 0.0)),
|
||||
"panic_score": float(score_row.get("panic_score", 0.0)),
|
||||
"choppy_score": float(score_row.get("choppy_score", 0.0)),
|
||||
"distribution_score": float(score_row.get("distribution_score", 0.0)),
|
||||
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
|
||||
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
|
||||
"portfolio_return": bar_ret,
|
||||
}
|
||||
)
|
||||
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
|
||||
return curve, pd.DataFrame(rows)
|
||||
|
||||
|
||||
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
|
||||
windows: dict[str, dict[str, float]] = {}
|
||||
for days, label in WINDOWS:
|
||||
start = latest_bar - pd.Timedelta(days=days)
|
||||
windows[label] = segment_metrics(curve, start, latest_bar)
|
||||
years: dict[str, dict[str, float]] = {}
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
years[label] = segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
|
||||
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: windows[label] for _, label in WINDOWS},
|
||||
{label: years[label] for label, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return windows, years, score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _candidate_space() -> list[HybridSwitchCandidate]:
|
||||
space: list[HybridSwitchCandidate] = []
|
||||
positive_sets = (
|
||||
("EUPHORIC_BREAKOUT",),
|
||||
("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
|
||||
)
|
||||
for positive_regimes in positive_sets:
|
||||
for core_score_min in (0.50, 0.55, 0.60):
|
||||
for breadth_persist_min in (0.50, 0.55, 0.60):
|
||||
for funding_persist_min in (0.55, 0.60, 0.65):
|
||||
for panic_max in (0.20, 0.30):
|
||||
for choppy_max in (0.40, 0.50):
|
||||
for distribution_max in (0.30, 0.40):
|
||||
space.append(
|
||||
HybridSwitchCandidate(
|
||||
positive_regimes=positive_regimes,
|
||||
core_score_min=core_score_min,
|
||||
breadth_persist_min=breadth_persist_min,
|
||||
funding_persist_min=funding_persist_min,
|
||||
panic_max=panic_max,
|
||||
choppy_max=choppy_max,
|
||||
distribution_max=distribution_max,
|
||||
)
|
||||
)
|
||||
return space
|
||||
|
||||
|
||||
def _baseline_summary() -> dict[str, object]:
|
||||
payload = json.loads(BASELINE_PATH.read_text(encoding="utf-8"))
|
||||
variants = payload["variants"]
|
||||
result: dict[str, object] = {}
|
||||
for name in ("current_overheat", "relaxed_overheat"):
|
||||
result[name] = variants[name]["results"]
|
||||
return result
|
||||
|
||||
|
||||
def _evaluate_exact_candidate(bundle, latest_bar: pd.Timestamp, switch_candidate: HybridSwitchCandidate) -> dict[str, object]:
|
||||
windows: dict[str, dict[str, float]] = {}
|
||||
years: dict[str, dict[str, float]] = {}
|
||||
latest_rows: list[dict[str, object]] = []
|
||||
periods = [
|
||||
*(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar) for days, label in WINDOWS),
|
||||
*(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))) for label, start, end_exclusive in YEAR_PERIODS),
|
||||
("year", "2026_YTD", YTD_START, latest_bar),
|
||||
]
|
||||
for kind, label, start, end in periods:
|
||||
current = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=start,
|
||||
eval_end=end,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
relaxed = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=start,
|
||||
eval_end=end,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
curve, rows = _compose_hybrid_curve(
|
||||
current_components=current,
|
||||
relaxed_components=relaxed,
|
||||
switch_candidate=switch_candidate,
|
||||
)
|
||||
metrics = segment_metrics(curve, start, end)
|
||||
if kind == "window":
|
||||
windows[label] = metrics
|
||||
else:
|
||||
years[label] = metrics
|
||||
if label == "2026_YTD":
|
||||
latest_rows = rows.tail(3).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: windows[label] for _, label in WINDOWS},
|
||||
{label: years[label] for label, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(switch_candidate),
|
||||
"name": switch_candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": windows,
|
||||
"years": years,
|
||||
"latest_rows": latest_rows,
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle(CACHE_PATH)
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
print("[phase] build current components", flush=True)
|
||||
current_components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
print("[phase] build relaxed components", flush=True)
|
||||
relaxed_components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
|
||||
search_rows: list[dict[str, object]] = []
|
||||
candidates = _candidate_space()
|
||||
print("[phase] search switch candidates", flush=True)
|
||||
for idx, candidate in enumerate(candidates, start=1):
|
||||
curve, rows = _compose_hybrid_curve(
|
||||
current_components=current_components,
|
||||
relaxed_components=relaxed_components,
|
||||
switch_candidate=candidate,
|
||||
)
|
||||
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
|
||||
relaxed_share = float((rows["active_core"] == "relaxed_overheat").mean()) if not rows.empty else 0.0
|
||||
search_rows.append(
|
||||
{
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"relaxed_share": relaxed_share,
|
||||
"windows": windows,
|
||||
"years": years,
|
||||
}
|
||||
)
|
||||
if idx % 36 == 0 or idx == len(candidates):
|
||||
print(f"[search] {idx}/{len(candidates)}", flush=True)
|
||||
|
||||
search_rows.sort(key=lambda row: float(row["score"]), reverse=True)
|
||||
best_search = search_rows[0]
|
||||
print(f"[phase] exact best {best_search['name']}", flush=True)
|
||||
best_exact = _evaluate_exact_candidate(
|
||||
bundle,
|
||||
latest_bar,
|
||||
HybridSwitchCandidate(**best_search["candidate"]),
|
||||
)
|
||||
|
||||
payload = {
|
||||
"analysis": "current_relaxed_hybrid_experiment",
|
||||
"latest_bar": str(latest_bar),
|
||||
"candidate": asdict(BEST_CASH_OVERLAY),
|
||||
"baselines": _baseline_summary(),
|
||||
"search_top": search_rows[:5],
|
||||
"best_exact": best_exact,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(json.dumps(payload, indent=2))
|
||||
print(f"[saved] {OUT_JSON}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
164
scripts/run_exhaustive_combo_analysis.py
Normal file
164
scripts/run_exhaustive_combo_analysis.py
Normal file
@@ -0,0 +1,164 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V5_BASELINE, build_strategy32_config
|
||||
from strategy32.data import build_strategy32_market_bundle
|
||||
|
||||
|
||||
WINDOWS = [(30, "1m"), (365, "1y"), (1095, "3y"), (1825, "5y")]
|
||||
FEATURES: list[tuple[str, str, bool]] = [
|
||||
("no_sideways", "enable_sideways_engine", False),
|
||||
("strong_kill_switch", "enable_strong_kill_switch", True),
|
||||
("daily_trend_filter", "enable_daily_trend_filter", True),
|
||||
("expanded_hedge", "enable_expanded_hedge", True),
|
||||
("max_holding_exit", "enable_max_holding_exit", True),
|
||||
]
|
||||
|
||||
|
||||
def variant_name(enabled: list[str]) -> str:
|
||||
return "baseline_v5" if not enabled else "+".join(enabled)
|
||||
|
||||
|
||||
def balanced_score(results: dict[str, dict[str, float | int | str]]) -> float:
|
||||
score = 0.0
|
||||
for label, weight in (("1y", 1.0), ("3y", 1.0), ("5y", 1.2)):
|
||||
annualized = float(results[label]["annualized_return"])
|
||||
drawdown = abs(float(results[label]["max_drawdown"]))
|
||||
score += weight * (annualized / max(drawdown, 0.01))
|
||||
score += 0.25 * float(results["1m"]["total_return"])
|
||||
return score
|
||||
|
||||
|
||||
def build_variants() -> list[tuple[str, dict[str, bool], list[str]]]:
|
||||
variants: list[tuple[str, dict[str, bool], list[str]]] = [("baseline_v5", {}, [])]
|
||||
feature_names = [feature[0] for feature in FEATURES]
|
||||
for r in range(1, len(FEATURES) + 1):
|
||||
for combo in itertools.combinations(range(len(FEATURES)), r):
|
||||
overrides: dict[str, bool] = {}
|
||||
enabled: list[str] = []
|
||||
for idx in combo:
|
||||
label, attr, value = FEATURES[idx]
|
||||
overrides[attr] = value
|
||||
enabled.append(label)
|
||||
variants.append((variant_name(enabled), overrides, enabled))
|
||||
return variants
|
||||
|
||||
|
||||
def main() -> None:
|
||||
base = build_strategy32_config(PROFILE_V5_BASELINE)
|
||||
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
|
||||
start = end - pd.Timedelta(days=max(days for days, _ in WINDOWS) + base.warmup_days + 14)
|
||||
|
||||
print("fetching bundle...")
|
||||
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=base.auto_discover_symbols,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=base.discovery_min_quote_volume_24h,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
print("latest", latest_completed_bar)
|
||||
|
||||
results: dict[str, dict[str, dict[str, float | int | str]]] = {}
|
||||
summary_rows: list[dict[str, float | int | str | list[str]]] = []
|
||||
|
||||
for idx, (name, overrides, enabled) in enumerate(build_variants(), start=1):
|
||||
cfg = copy.deepcopy(base)
|
||||
for attr, value in overrides.items():
|
||||
setattr(cfg, attr, value)
|
||||
variant_results = {}
|
||||
print(f"\n[{idx:02d}/32] {name}")
|
||||
for days, label in WINDOWS:
|
||||
eval_end = latest_completed_bar
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
|
||||
metrics["engine_pnl"] = result.engine_pnl
|
||||
metrics["total_trades"] = result.total_trades
|
||||
variant_results[label] = metrics
|
||||
print(
|
||||
label,
|
||||
"ret",
|
||||
round(float(metrics["total_return"]) * 100, 2),
|
||||
"mdd",
|
||||
round(float(metrics["max_drawdown"]) * 100, 2),
|
||||
"sharpe",
|
||||
round(float(metrics["sharpe"]), 2),
|
||||
"trades",
|
||||
metrics["trade_count"],
|
||||
)
|
||||
score = balanced_score(variant_results)
|
||||
results[name] = variant_results
|
||||
summary_rows.append(
|
||||
{
|
||||
"name": name,
|
||||
"enabled": enabled,
|
||||
"balanced_score": score,
|
||||
"ret_1m": float(variant_results["1m"]["total_return"]),
|
||||
"ret_1y": float(variant_results["1y"]["total_return"]),
|
||||
"ret_3y": float(variant_results["3y"]["total_return"]),
|
||||
"ret_5y": float(variant_results["5y"]["total_return"]),
|
||||
"mdd_1y": float(variant_results["1y"]["max_drawdown"]),
|
||||
"mdd_3y": float(variant_results["3y"]["max_drawdown"]),
|
||||
"mdd_5y": float(variant_results["5y"]["max_drawdown"]),
|
||||
}
|
||||
)
|
||||
|
||||
summary_rows.sort(key=lambda row: float(row["balanced_score"]), reverse=True)
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"analysis": "v6_exhaustive_combo",
|
||||
"initial_capital": 1000.0,
|
||||
"auto_discover_symbols": base.auto_discover_symbols,
|
||||
"latest_completed_bar": str(latest_completed_bar),
|
||||
"requested_symbols": [] if base.auto_discover_symbols else base.symbols,
|
||||
"accepted_symbols": accepted_symbols,
|
||||
"rejected_symbols": rejected_symbols,
|
||||
"quote_by_symbol": quote_by_symbol,
|
||||
"timeframe": base.timeframe,
|
||||
"results": results,
|
||||
"summary": summary_rows,
|
||||
}
|
||||
out = Path("/tmp/strategy32_v6_exhaustive_combos.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print("\nTop 10 by balanced score")
|
||||
for row in summary_rows[:10]:
|
||||
print(
|
||||
row["name"],
|
||||
"score",
|
||||
round(float(row["balanced_score"]), 3),
|
||||
"1y",
|
||||
round(float(row["ret_1y"]) * 100, 2),
|
||||
"3y",
|
||||
round(float(row["ret_3y"]) * 100, 2),
|
||||
"5y",
|
||||
round(float(row["ret_5y"]) * 100, 2),
|
||||
"mdd5y",
|
||||
round(float(row["mdd_5y"]) * 100, 2),
|
||||
)
|
||||
print("\nwrote", out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
283
scripts/run_filter_search.py
Normal file
283
scripts/run_filter_search.py
Normal file
@@ -0,0 +1,283 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import json
|
||||
import multiprocessing as mp
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
|
||||
from strategy29.common.models import MarketDataBundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, Strategy32Config, build_strategy32_config
|
||||
from strategy32.data import build_strategy32_market_bundle
|
||||
|
||||
|
||||
COARSE_WINDOWS = [(365, "1y"), (1095, "3y")]
|
||||
FINAL_WINDOWS = [(365, "1y"), (1095, "3y"), (1825, "5y")]
|
||||
COARSE_LIQUIDITY_FLOORS = [5_000_000.0, 10_000_000.0, 20_000_000.0, 50_000_000.0]
|
||||
COARSE_MOMENTUM_SCORES = [0.55, 0.60, 0.65]
|
||||
COARSE_RELATIVE_STRENGTH = [0.00, 0.02]
|
||||
COARSE_7D_RETURNS = [0.00, 0.02]
|
||||
FINE_CORRELATION_CAPS = [0.70, 0.78]
|
||||
FINE_CARRY_MIN_EDGE = [0.0, 0.002]
|
||||
TOP_COARSE_FOR_FINE = 8
|
||||
TOP_FINE_FOR_FINAL = 5
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class FilterVariant:
|
||||
name: str
|
||||
liquidity_floor: float
|
||||
momentum_min_score: float
|
||||
momentum_min_relative_strength: float
|
||||
momentum_min_7d_return: float
|
||||
max_pairwise_correlation: float
|
||||
carry_min_expected_edge: float
|
||||
|
||||
|
||||
GLOBAL_BUNDLE: MarketDataBundle | None = None
|
||||
GLOBAL_LATEST_BAR: pd.Timestamp | None = None
|
||||
|
||||
|
||||
def _subset_bundle(bundle: MarketDataBundle, symbols: set[str]) -> MarketDataBundle:
|
||||
return MarketDataBundle(
|
||||
prices={symbol: df for symbol, df in bundle.prices.items() if symbol in symbols},
|
||||
funding={symbol: df for symbol, df in bundle.funding.items() if symbol in symbols},
|
||||
)
|
||||
|
||||
|
||||
def _score_results(results: dict[str, dict[str, float | int | str]], include_5y: bool) -> float:
|
||||
score = 0.0
|
||||
ret_1y = float(results["1y"]["total_return"])
|
||||
ann_1y = float(results["1y"]["annualized_return"])
|
||||
mdd_1y = abs(float(results["1y"]["max_drawdown"]))
|
||||
ret_3y = float(results["3y"]["total_return"])
|
||||
ann_3y = float(results["3y"]["annualized_return"])
|
||||
mdd_3y = abs(float(results["3y"]["max_drawdown"]))
|
||||
score += 1.8 * (ann_1y / max(mdd_1y, 0.01))
|
||||
score += 1.0 * (ann_3y / max(mdd_3y, 0.01))
|
||||
score += 0.25 * ret_1y + 0.15 * ret_3y
|
||||
if ret_1y <= 0:
|
||||
score -= 6.0
|
||||
if float(results["1y"]["max_drawdown"]) < -0.25:
|
||||
score -= 1.0
|
||||
if include_5y:
|
||||
ret_5y = float(results["5y"]["total_return"])
|
||||
ann_5y = float(results["5y"]["annualized_return"])
|
||||
mdd_5y = abs(float(results["5y"]["max_drawdown"]))
|
||||
score += 0.8 * (ann_5y / max(mdd_5y, 0.01))
|
||||
score += 0.10 * ret_5y
|
||||
if float(results["5y"]["max_drawdown"]) < -0.30:
|
||||
score -= 1.0
|
||||
return score
|
||||
|
||||
|
||||
def _evaluate_variant(variant: FilterVariant, windows: list[tuple[int, str]]) -> dict[str, object]:
|
||||
if GLOBAL_BUNDLE is None or GLOBAL_LATEST_BAR is None:
|
||||
raise RuntimeError("global bundle not initialized")
|
||||
cfg: Strategy32Config = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
|
||||
cfg.universe_min_avg_dollar_volume = variant.liquidity_floor
|
||||
cfg.momentum_min_score = variant.momentum_min_score
|
||||
cfg.momentum_min_relative_strength = variant.momentum_min_relative_strength
|
||||
cfg.momentum_min_7d_return = variant.momentum_min_7d_return
|
||||
cfg.max_pairwise_correlation = variant.max_pairwise_correlation
|
||||
cfg.carry_min_expected_edge = variant.carry_min_expected_edge
|
||||
|
||||
bundle = GLOBAL_BUNDLE
|
||||
results: dict[str, dict[str, float | int | str]] = {}
|
||||
for days, label in windows:
|
||||
eval_end = GLOBAL_LATEST_BAR
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
|
||||
metrics["engine_pnl"] = result.engine_pnl
|
||||
metrics["total_trades"] = result.total_trades
|
||||
metrics["universe_size"] = len(bundle.prices)
|
||||
results[label] = metrics
|
||||
|
||||
score = _score_results(results, include_5y=any(label == "5y" for _, label in windows))
|
||||
return {
|
||||
"variant": asdict(variant),
|
||||
"score": score,
|
||||
"results": results,
|
||||
}
|
||||
|
||||
|
||||
def _run_parallel(variants: list[FilterVariant], windows: list[tuple[int, str]], workers: int = 6) -> list[dict[str, object]]:
|
||||
ctx = mp.get_context("fork")
|
||||
rows: list[dict[str, object]] = []
|
||||
with ProcessPoolExecutor(max_workers=workers, mp_context=ctx) as executor:
|
||||
future_map = {executor.submit(_evaluate_variant, variant, windows): variant for variant in variants}
|
||||
total = len(future_map)
|
||||
done = 0
|
||||
for future in as_completed(future_map):
|
||||
row = future.result()
|
||||
rows.append(row)
|
||||
done += 1
|
||||
variant = row["variant"]
|
||||
results = row["results"]
|
||||
print(
|
||||
f"[{done:02d}/{total}] {variant['name']} score={row['score']:.3f} "
|
||||
f"1y={float(results['1y']['total_return'])*100:.2f}% "
|
||||
f"3y={float(results['3y']['total_return'])*100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
rows.sort(key=lambda row: float(row["score"]), reverse=True)
|
||||
return rows
|
||||
|
||||
|
||||
def _build_coarse_variants() -> list[FilterVariant]:
|
||||
variants: list[FilterVariant] = []
|
||||
for floor, score, rs, ret7 in itertools.product(
|
||||
COARSE_LIQUIDITY_FLOORS,
|
||||
COARSE_MOMENTUM_SCORES,
|
||||
COARSE_RELATIVE_STRENGTH,
|
||||
COARSE_7D_RETURNS,
|
||||
):
|
||||
name = f"liq{int(floor/1_000_000)}m_s{score:.2f}_rs{rs:.2f}_r7{ret7:.2f}"
|
||||
variants.append(
|
||||
FilterVariant(
|
||||
name=name,
|
||||
liquidity_floor=floor,
|
||||
momentum_min_score=score,
|
||||
momentum_min_relative_strength=rs,
|
||||
momentum_min_7d_return=ret7,
|
||||
max_pairwise_correlation=0.78,
|
||||
carry_min_expected_edge=0.0,
|
||||
)
|
||||
)
|
||||
return variants
|
||||
|
||||
|
||||
def _build_fine_variants(top_rows: list[dict[str, object]]) -> list[FilterVariant]:
|
||||
variants: list[FilterVariant] = []
|
||||
seen: set[tuple] = set()
|
||||
for row in top_rows[:TOP_COARSE_FOR_FINE]:
|
||||
base = row["variant"]
|
||||
for corr_cap, carry_edge in itertools.product(FINE_CORRELATION_CAPS, FINE_CARRY_MIN_EDGE):
|
||||
key = (
|
||||
base["liquidity_floor"],
|
||||
base["momentum_min_score"],
|
||||
base["momentum_min_relative_strength"],
|
||||
base["momentum_min_7d_return"],
|
||||
corr_cap,
|
||||
carry_edge,
|
||||
)
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
variants.append(
|
||||
FilterVariant(
|
||||
name=(
|
||||
f"liq{int(base['liquidity_floor']/1_000_000)}m_"
|
||||
f"s{base['momentum_min_score']:.2f}_"
|
||||
f"rs{base['momentum_min_relative_strength']:.2f}_"
|
||||
f"r7{base['momentum_min_7d_return']:.2f}_"
|
||||
f"corr{corr_cap:.2f}_carry{carry_edge:.3f}"
|
||||
),
|
||||
liquidity_floor=float(base["liquidity_floor"]),
|
||||
momentum_min_score=float(base["momentum_min_score"]),
|
||||
momentum_min_relative_strength=float(base["momentum_min_relative_strength"]),
|
||||
momentum_min_7d_return=float(base["momentum_min_7d_return"]),
|
||||
max_pairwise_correlation=float(corr_cap),
|
||||
carry_min_expected_edge=float(carry_edge),
|
||||
)
|
||||
)
|
||||
return variants
|
||||
|
||||
|
||||
def main() -> None:
|
||||
global GLOBAL_BUNDLE, GLOBAL_LATEST_BAR
|
||||
|
||||
base = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
max_days = max(days for days, _ in FINAL_WINDOWS)
|
||||
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
|
||||
start_5y = end - pd.Timedelta(days=max_days + base.warmup_days + 14)
|
||||
start_3y = end - pd.Timedelta(days=max(days for days, _ in COARSE_WINDOWS) + base.warmup_days + 14)
|
||||
|
||||
lowest_floor = min(COARSE_LIQUIDITY_FLOORS)
|
||||
print("fetching 3y bundle for coarse search...")
|
||||
GLOBAL_BUNDLE, GLOBAL_LATEST_BAR, accepted_symbols_3y, rejected_symbols_3y, quote_by_symbol_3y = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=True,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=lowest_floor,
|
||||
start=start_3y,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
|
||||
coarse_rows = _run_parallel(_build_coarse_variants(), COARSE_WINDOWS)
|
||||
fine_rows = _run_parallel(_build_fine_variants(coarse_rows), COARSE_WINDOWS)
|
||||
|
||||
print("fetching 5y bundle for final validation...")
|
||||
GLOBAL_BUNDLE, GLOBAL_LATEST_BAR, accepted_symbols_5y, rejected_symbols_5y, quote_by_symbol_5y = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=True,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=lowest_floor,
|
||||
start=start_5y,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
final_variants = [FilterVariant(**row["variant"]) for row in fine_rows[:TOP_FINE_FOR_FINAL]]
|
||||
final_rows = _run_parallel(final_variants, FINAL_WINDOWS)
|
||||
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"analysis": "wide_universe_filter_search",
|
||||
"profile": PROFILE_V7_DEFAULT,
|
||||
"initial_capital": 1000.0,
|
||||
"latest_completed_bar": str(GLOBAL_LATEST_BAR),
|
||||
"accepted_symbols_3y": accepted_symbols_3y,
|
||||
"rejected_symbols_3y": rejected_symbols_3y,
|
||||
"quote_by_symbol_3y": quote_by_symbol_3y,
|
||||
"accepted_symbols_5y": accepted_symbols_5y,
|
||||
"rejected_symbols_5y": rejected_symbols_5y,
|
||||
"quote_by_symbol_5y": quote_by_symbol_5y,
|
||||
"coarse_top10": coarse_rows[:10],
|
||||
"fine_top10": fine_rows[:10],
|
||||
"final_ranked": final_rows,
|
||||
}
|
||||
out = Path("/tmp/strategy32_filter_search.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print("\nTop final variants")
|
||||
for row in final_rows:
|
||||
metrics = row["results"]
|
||||
print(
|
||||
row["variant"]["name"],
|
||||
"score",
|
||||
round(float(row["score"]), 3),
|
||||
"1y",
|
||||
round(float(metrics["1y"]["total_return"]) * 100, 2),
|
||||
"3y",
|
||||
round(float(metrics["3y"]["total_return"]) * 100, 2),
|
||||
"5y",
|
||||
round(float(metrics["5y"]["total_return"]) * 100, 2),
|
||||
"mdd5y",
|
||||
round(float(metrics["5y"]["max_drawdown"]) * 100, 2),
|
||||
)
|
||||
print("\nwrote", out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
447
scripts/run_filter_search_extended.py
Normal file
447
scripts/run_filter_search_extended.py
Normal file
@@ -0,0 +1,447 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import json
|
||||
import multiprocessing as mp
|
||||
import random
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
|
||||
from strategy29.common.models import MarketDataBundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, Strategy32Config, build_strategy32_config
|
||||
from strategy32.data import build_strategy32_market_bundle
|
||||
|
||||
|
||||
FINAL_WINDOWS = [(365, "1y"), (1095, "3y"), (1825, "5y")]
|
||||
RANDOM_SEED = 32
|
||||
RANDOM_SAMPLE_SIZE = 96
|
||||
TOP_BROAD_FOR_LOCAL = 10
|
||||
LOCAL_SAMPLE_SIZE = 192
|
||||
TOP_FINAL_FOR_EXACT = 8
|
||||
|
||||
LIQUIDITY_FLOORS = [20_000_000.0, 30_000_000.0, 40_000_000.0, 50_000_000.0, 60_000_000.0, 75_000_000.0, 100_000_000.0]
|
||||
AVG_DOLLAR_VOLUME_MULTIPLIERS = [0.50, 0.75, 1.00]
|
||||
MOMENTUM_MIN_SCORES = [0.50, 0.55, 0.60, 0.65, 0.70]
|
||||
MOMENTUM_MIN_RS = [-0.02, 0.00, 0.02]
|
||||
MOMENTUM_MIN_7D = [-0.02, 0.00, 0.02]
|
||||
CORRELATION_CAPS = [0.65, 0.70, 0.74, 0.78, 0.82]
|
||||
CARRY_MIN_EDGES = [0.0, 0.001, 0.002]
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ExtendedFilterVariant:
|
||||
name: str
|
||||
liquidity_floor: float
|
||||
avg_dollar_volume_floor: float
|
||||
momentum_min_score: float
|
||||
momentum_min_relative_strength: float
|
||||
momentum_min_7d_return: float
|
||||
max_pairwise_correlation: float
|
||||
carry_min_expected_edge: float
|
||||
|
||||
|
||||
GLOBAL_BUNDLE: MarketDataBundle | None = None
|
||||
GLOBAL_LATEST_BAR: pd.Timestamp | None = None
|
||||
|
||||
|
||||
def _subset_bundle(bundle: MarketDataBundle, symbols: set[str]) -> MarketDataBundle:
|
||||
return MarketDataBundle(
|
||||
prices={symbol: df for symbol, df in bundle.prices.items() if symbol in symbols},
|
||||
funding={symbol: df for symbol, df in bundle.funding.items() if symbol in symbols},
|
||||
)
|
||||
|
||||
|
||||
def _window_return(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> float | None:
|
||||
window = curve.loc[(curve.index >= start) & (curve.index <= end)]
|
||||
if len(window) < 2:
|
||||
return None
|
||||
start_equity = float(window.iloc[0])
|
||||
end_equity = float(window.iloc[-1])
|
||||
if start_equity <= 0:
|
||||
return None
|
||||
return end_equity / start_equity - 1.0
|
||||
|
||||
|
||||
def _rolling_returns(curve: pd.Series, *, window_days: int, step_days: int) -> list[float]:
|
||||
if curve.empty:
|
||||
return []
|
||||
end = curve.index[-1]
|
||||
start = curve.index[0]
|
||||
returns: list[float] = []
|
||||
cursor = start + pd.Timedelta(days=window_days)
|
||||
while cursor <= end:
|
||||
ret = _window_return(curve, cursor - pd.Timedelta(days=window_days), cursor)
|
||||
if ret is not None:
|
||||
returns.append(ret)
|
||||
cursor += pd.Timedelta(days=step_days)
|
||||
return returns
|
||||
|
||||
|
||||
def _summarize_rolling(curve: pd.Series) -> dict[str, float]:
|
||||
rolling_12m = _rolling_returns(curve, window_days=365, step_days=30)
|
||||
rolling_24m = _rolling_returns(curve, window_days=730, step_days=30)
|
||||
metrics = {
|
||||
"rolling_12m_count": float(len(rolling_12m)),
|
||||
"rolling_12m_positive_ratio": float(sum(ret > 0 for ret in rolling_12m) / len(rolling_12m)) if rolling_12m else 0.0,
|
||||
"rolling_12m_median": float(pd.Series(rolling_12m).median()) if rolling_12m else 0.0,
|
||||
"rolling_12m_worst": float(min(rolling_12m)) if rolling_12m else 0.0,
|
||||
"rolling_24m_count": float(len(rolling_24m)),
|
||||
"rolling_24m_positive_ratio": float(sum(ret > 0 for ret in rolling_24m) / len(rolling_24m)) if rolling_24m else 0.0,
|
||||
"rolling_24m_median": float(pd.Series(rolling_24m).median()) if rolling_24m else 0.0,
|
||||
"rolling_24m_worst": float(min(rolling_24m)) if rolling_24m else 0.0,
|
||||
}
|
||||
return metrics
|
||||
|
||||
|
||||
def _score_variant(full_metrics: dict[str, float], rolling_metrics: dict[str, float]) -> float:
|
||||
annualized_return = float(full_metrics["annualized_return"])
|
||||
max_dd = abs(float(full_metrics["max_drawdown"]))
|
||||
sharpe = float(full_metrics["sharpe"])
|
||||
calmar = annualized_return / max(max_dd, 0.01)
|
||||
|
||||
score = 0.0
|
||||
score += 2.0 * calmar
|
||||
score += 0.6 * sharpe
|
||||
score += 1.6 * rolling_metrics["rolling_12m_positive_ratio"]
|
||||
score += 1.0 * rolling_metrics["rolling_24m_positive_ratio"]
|
||||
score += 3.0 * rolling_metrics["rolling_12m_median"]
|
||||
score += 2.2 * rolling_metrics["rolling_24m_median"]
|
||||
score += 1.8 * rolling_metrics["rolling_12m_worst"]
|
||||
score += 1.0 * rolling_metrics["rolling_24m_worst"]
|
||||
score += 0.25 * float(full_metrics["total_return"])
|
||||
|
||||
if rolling_metrics["rolling_12m_positive_ratio"] < 0.55:
|
||||
score -= 0.8
|
||||
if rolling_metrics["rolling_12m_worst"] < -0.18:
|
||||
score -= 1.2
|
||||
if float(full_metrics["max_drawdown"]) < -0.30:
|
||||
score -= 1.0
|
||||
if annualized_return < 0.08:
|
||||
score -= 0.6
|
||||
return score
|
||||
|
||||
|
||||
def _evaluate_variant(variant: ExtendedFilterVariant) -> dict[str, object]:
|
||||
if GLOBAL_BUNDLE is None or GLOBAL_LATEST_BAR is None:
|
||||
raise RuntimeError("global bundle not initialized")
|
||||
|
||||
cfg: Strategy32Config = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
|
||||
cfg.universe_min_avg_dollar_volume = variant.avg_dollar_volume_floor
|
||||
cfg.momentum_min_score = variant.momentum_min_score
|
||||
cfg.momentum_min_relative_strength = variant.momentum_min_relative_strength
|
||||
cfg.momentum_min_7d_return = variant.momentum_min_7d_return
|
||||
cfg.max_pairwise_correlation = variant.max_pairwise_correlation
|
||||
cfg.carry_min_expected_edge = variant.carry_min_expected_edge
|
||||
|
||||
bundle = GLOBAL_BUNDLE
|
||||
eval_end = GLOBAL_LATEST_BAR
|
||||
eval_start = eval_end - pd.Timedelta(days=1825)
|
||||
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
|
||||
full_metrics = evaluate_window_result(
|
||||
result,
|
||||
eval_start=eval_start,
|
||||
bars_per_day=backtester.engine_config.bars_per_day,
|
||||
)
|
||||
curve = result.equity_curve.loc[result.equity_curve.index >= eval_start]
|
||||
rolling_metrics = _summarize_rolling(curve)
|
||||
score = _score_variant(full_metrics, rolling_metrics)
|
||||
return {
|
||||
"variant": asdict(variant),
|
||||
"score": score,
|
||||
"full_metrics": full_metrics,
|
||||
"rolling_metrics": rolling_metrics,
|
||||
"engine_pnl": result.engine_pnl,
|
||||
"total_trades": result.total_trades,
|
||||
"universe_size": len(bundle.prices),
|
||||
}
|
||||
|
||||
|
||||
def _evaluate_exact_windows(variant: ExtendedFilterVariant) -> dict[str, object]:
|
||||
if GLOBAL_BUNDLE is None or GLOBAL_LATEST_BAR is None:
|
||||
raise RuntimeError("global bundle not initialized")
|
||||
|
||||
cfg: Strategy32Config = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
|
||||
cfg.universe_min_avg_dollar_volume = variant.avg_dollar_volume_floor
|
||||
cfg.momentum_min_score = variant.momentum_min_score
|
||||
cfg.momentum_min_relative_strength = variant.momentum_min_relative_strength
|
||||
cfg.momentum_min_7d_return = variant.momentum_min_7d_return
|
||||
cfg.max_pairwise_correlation = variant.max_pairwise_correlation
|
||||
cfg.carry_min_expected_edge = variant.carry_min_expected_edge
|
||||
|
||||
bundle = GLOBAL_BUNDLE
|
||||
results: dict[str, dict[str, float | int | str]] = {}
|
||||
for days, label in FINAL_WINDOWS:
|
||||
eval_end = GLOBAL_LATEST_BAR
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
metrics = evaluate_window_result(
|
||||
result,
|
||||
eval_start=eval_start,
|
||||
bars_per_day=backtester.engine_config.bars_per_day,
|
||||
)
|
||||
metrics["engine_pnl"] = result.engine_pnl
|
||||
metrics["total_trades"] = result.total_trades
|
||||
results[label] = metrics
|
||||
|
||||
return {
|
||||
"variant": asdict(variant),
|
||||
"results": results,
|
||||
}
|
||||
|
||||
|
||||
def _run_parallel(func, variants: list[ExtendedFilterVariant], workers: int = 6) -> list[dict[str, object]]:
|
||||
ctx = mp.get_context("fork")
|
||||
rows: list[dict[str, object]] = []
|
||||
with ProcessPoolExecutor(max_workers=workers, mp_context=ctx) as executor:
|
||||
future_map = {executor.submit(func, variant): variant for variant in variants}
|
||||
total = len(future_map)
|
||||
done = 0
|
||||
for future in as_completed(future_map):
|
||||
row = future.result()
|
||||
rows.append(row)
|
||||
done += 1
|
||||
if "full_metrics" in row:
|
||||
print(
|
||||
f"[{done:03d}/{total}] {row['variant']['name']} "
|
||||
f"score={float(row['score']):.3f} "
|
||||
f"5y={float(row['full_metrics']['total_return']) * 100:.2f}% "
|
||||
f"mdd={float(row['full_metrics']['max_drawdown']) * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
else:
|
||||
metrics = row["results"]
|
||||
print(
|
||||
f"[{done:02d}/{total}] exact {row['variant']['name']} "
|
||||
f"1y={float(metrics['1y']['total_return']) * 100:.2f}% "
|
||||
f"3y={float(metrics['3y']['total_return']) * 100:.2f}% "
|
||||
f"5y={float(metrics['5y']['total_return']) * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
rows.sort(key=lambda row: float(row.get("score", 0.0)), reverse=True)
|
||||
return rows
|
||||
|
||||
|
||||
def _all_combos() -> list[ExtendedFilterVariant]:
|
||||
variants: list[ExtendedFilterVariant] = []
|
||||
for floor, avg_mult, score, rs, ret7, corr, carry in itertools.product(
|
||||
LIQUIDITY_FLOORS,
|
||||
AVG_DOLLAR_VOLUME_MULTIPLIERS,
|
||||
MOMENTUM_MIN_SCORES,
|
||||
MOMENTUM_MIN_RS,
|
||||
MOMENTUM_MIN_7D,
|
||||
CORRELATION_CAPS,
|
||||
CARRY_MIN_EDGES,
|
||||
):
|
||||
name = (
|
||||
f"liq{int(floor/1_000_000)}m"
|
||||
f"_avg{avg_mult:.2f}"
|
||||
f"_s{score:.2f}"
|
||||
f"_rs{rs:.2f}"
|
||||
f"_r7{ret7:.2f}"
|
||||
f"_corr{corr:.2f}"
|
||||
f"_carry{carry:.3f}"
|
||||
)
|
||||
variants.append(
|
||||
ExtendedFilterVariant(
|
||||
name=name,
|
||||
liquidity_floor=floor,
|
||||
avg_dollar_volume_floor=floor * avg_mult,
|
||||
momentum_min_score=score,
|
||||
momentum_min_relative_strength=rs,
|
||||
momentum_min_7d_return=ret7,
|
||||
max_pairwise_correlation=corr,
|
||||
carry_min_expected_edge=carry,
|
||||
)
|
||||
)
|
||||
return variants
|
||||
|
||||
|
||||
def _seed_variants() -> list[ExtendedFilterVariant]:
|
||||
return [
|
||||
ExtendedFilterVariant("prev_balanced", 50_000_000.0, 50_000_000.0, 0.60, 0.00, 0.00, 0.70, 0.0),
|
||||
ExtendedFilterVariant("prev_profit", 50_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.78, 0.0),
|
||||
ExtendedFilterVariant("prev_profit_carry", 50_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.78, 0.002),
|
||||
]
|
||||
|
||||
|
||||
def _build_random_sample(seed: int) -> list[ExtendedFilterVariant]:
|
||||
rng = random.Random(seed)
|
||||
combos = _all_combos()
|
||||
seeded_names = {variant.name for variant in _seed_variants()}
|
||||
sample = rng.sample(combos, k=min(RANDOM_SAMPLE_SIZE, len(combos)))
|
||||
unique: dict[str, ExtendedFilterVariant] = {variant.name: variant for variant in _seed_variants()}
|
||||
for variant in sample:
|
||||
if variant.name in seeded_names:
|
||||
continue
|
||||
unique[variant.name] = variant
|
||||
return list(unique.values())
|
||||
|
||||
|
||||
def _neighbor_values(value: float, values: list[float]) -> list[float]:
|
||||
idx = values.index(value)
|
||||
start = max(0, idx - 1)
|
||||
end = min(len(values), idx + 2)
|
||||
return values[start:end]
|
||||
|
||||
|
||||
def _build_local_variants(rows: list[dict[str, object]]) -> list[ExtendedFilterVariant]:
|
||||
seen: dict[str, ExtendedFilterVariant] = {}
|
||||
for row in rows[:TOP_BROAD_FOR_LOCAL]:
|
||||
base = row["variant"]
|
||||
floor_values = _neighbor_values(float(base["liquidity_floor"]), LIQUIDITY_FLOORS)
|
||||
avg_mult_values = _neighbor_values(float(base["avg_dollar_volume_floor"]) / float(base["liquidity_floor"]), AVG_DOLLAR_VOLUME_MULTIPLIERS)
|
||||
score_values = _neighbor_values(float(base["momentum_min_score"]), MOMENTUM_MIN_SCORES)
|
||||
rs_values = _neighbor_values(float(base["momentum_min_relative_strength"]), MOMENTUM_MIN_RS)
|
||||
ret7_values = _neighbor_values(float(base["momentum_min_7d_return"]), MOMENTUM_MIN_7D)
|
||||
corr_values = _neighbor_values(float(base["max_pairwise_correlation"]), CORRELATION_CAPS)
|
||||
carry_values = _neighbor_values(float(base["carry_min_expected_edge"]), CARRY_MIN_EDGES)
|
||||
|
||||
for floor, avg_mult, score, rs, ret7, corr, carry in itertools.product(
|
||||
floor_values,
|
||||
avg_mult_values,
|
||||
score_values,
|
||||
rs_values,
|
||||
ret7_values,
|
||||
corr_values,
|
||||
carry_values,
|
||||
):
|
||||
name = (
|
||||
f"liq{int(floor/1_000_000)}m"
|
||||
f"_avg{avg_mult:.2f}"
|
||||
f"_s{score:.2f}"
|
||||
f"_rs{rs:.2f}"
|
||||
f"_r7{ret7:.2f}"
|
||||
f"_corr{corr:.2f}"
|
||||
f"_carry{carry:.3f}"
|
||||
)
|
||||
seen[name] = ExtendedFilterVariant(
|
||||
name=name,
|
||||
liquidity_floor=floor,
|
||||
avg_dollar_volume_floor=floor * avg_mult,
|
||||
momentum_min_score=score,
|
||||
momentum_min_relative_strength=rs,
|
||||
momentum_min_7d_return=ret7,
|
||||
max_pairwise_correlation=corr,
|
||||
carry_min_expected_edge=carry,
|
||||
)
|
||||
variants = list(seen.values())
|
||||
if len(variants) <= LOCAL_SAMPLE_SIZE:
|
||||
return variants
|
||||
rng = random.Random(RANDOM_SEED + 1)
|
||||
return rng.sample(variants, k=LOCAL_SAMPLE_SIZE)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
global GLOBAL_BUNDLE, GLOBAL_LATEST_BAR
|
||||
|
||||
base = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
|
||||
max_days = max(days for days, _ in FINAL_WINDOWS)
|
||||
lowest_floor = min(LIQUIDITY_FLOORS)
|
||||
start = end - pd.Timedelta(days=max_days + base.warmup_days + 14)
|
||||
|
||||
print(f"building unbiased discovery bundle with current-volume filter disabled")
|
||||
|
||||
GLOBAL_BUNDLE, GLOBAL_LATEST_BAR, accepted, rejected, quote_by_symbol = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=True,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=lowest_floor,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
|
||||
broad_variants = _build_random_sample(RANDOM_SEED)
|
||||
print(f"running broad search on {len(broad_variants)} variants")
|
||||
broad_rows = _run_parallel(_evaluate_variant, broad_variants)
|
||||
|
||||
local_variants = _build_local_variants(broad_rows)
|
||||
already = {row["variant"]["name"] for row in broad_rows}
|
||||
local_variants = [variant for variant in local_variants if variant.name not in already]
|
||||
print(f"running local refinement on {len(local_variants)} variants")
|
||||
local_rows = _run_parallel(_evaluate_variant, local_variants)
|
||||
|
||||
merged: dict[str, dict[str, object]] = {}
|
||||
for row in broad_rows + local_rows:
|
||||
merged[row["variant"]["name"]] = row
|
||||
ranked = sorted(merged.values(), key=lambda row: float(row["score"]), reverse=True)
|
||||
|
||||
exact_variants = [ExtendedFilterVariant(**row["variant"]) for row in ranked[:TOP_FINAL_FOR_EXACT]]
|
||||
print(f"running exact 1y/3y/5y validation on top {len(exact_variants)} variants")
|
||||
exact_rows = _run_parallel(_evaluate_exact_windows, exact_variants)
|
||||
exact_by_name = {row["variant"]["name"]: row for row in exact_rows}
|
||||
|
||||
final_ranked: list[dict[str, object]] = []
|
||||
for row in ranked[:TOP_FINAL_FOR_EXACT]:
|
||||
name = row["variant"]["name"]
|
||||
final_ranked.append(
|
||||
{
|
||||
**row,
|
||||
"exact_windows": exact_by_name[name]["results"],
|
||||
}
|
||||
)
|
||||
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"analysis": "wide_universe_filter_search_extended",
|
||||
"profile": PROFILE_V7_DEFAULT,
|
||||
"initial_capital": 1000.0,
|
||||
"latest_completed_bar": str(GLOBAL_LATEST_BAR),
|
||||
"accepted_symbols": accepted,
|
||||
"rejected_symbols": rejected,
|
||||
"quote_by_symbol": quote_by_symbol,
|
||||
"broad_variants": len(broad_rows),
|
||||
"local_variants": len(local_rows),
|
||||
"ranked_top20": ranked[:20],
|
||||
"final_ranked": final_ranked,
|
||||
}
|
||||
out = Path("/tmp/strategy32_filter_search_extended.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
|
||||
print("\nTop exact variants")
|
||||
for row in final_ranked:
|
||||
metrics = row["exact_windows"]
|
||||
print(
|
||||
row["variant"]["name"],
|
||||
"score",
|
||||
round(float(row["score"]), 3),
|
||||
"1y",
|
||||
round(float(metrics["1y"]["total_return"]) * 100, 2),
|
||||
"3y",
|
||||
round(float(metrics["3y"]["total_return"]) * 100, 2),
|
||||
"5y",
|
||||
round(float(metrics["5y"]["total_return"]) * 100, 2),
|
||||
"mdd5y",
|
||||
round(float(metrics["5y"]["max_drawdown"]) * 100, 2),
|
||||
)
|
||||
print("\nwrote", out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
30
scripts/run_hybrid_regime_backtest.py
Normal file
30
scripts/run_hybrid_regime_backtest.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.research.hybrid_regime import run_hybrid_backtest
|
||||
|
||||
|
||||
def main() -> None:
|
||||
payload = run_hybrid_backtest()
|
||||
out = Path("/tmp/strategy32_hybrid_regime_backtest.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
for label, metrics in payload["results"].items():
|
||||
print(
|
||||
label,
|
||||
f"ret={float(metrics['total_return']) * 100:.2f}%",
|
||||
f"ann={float(metrics['annualized_return']) * 100:.2f}%",
|
||||
f"sharpe={float(metrics['sharpe']):.2f}",
|
||||
f"mdd={float(metrics['max_drawdown']) * 100:.2f}%",
|
||||
)
|
||||
print(f"wrote {out}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
458
scripts/run_hybrid_strategy_search.py
Normal file
458
scripts/run_hybrid_strategy_search.py
Normal file
@@ -0,0 +1,458 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import multiprocessing as mp
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from statistics import median
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
|
||||
from strategy32.research.hybrid_regime import (
|
||||
_build_positive_filter_plan,
|
||||
_curve_returns,
|
||||
_run_adverse_component_curve,
|
||||
_run_static_component_curve,
|
||||
load_fixed66_bundle,
|
||||
)
|
||||
from strategy32.scripts.run_regime_filter_analysis import STRATEGIC_REGIME_PROFILES, build_strategic_regime_frame
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_hybrid_strategy_search.json")
|
||||
WINDOWS = (
|
||||
(365, "1y"),
|
||||
(730, "2y"),
|
||||
(1095, "3y"),
|
||||
(1460, "4y"),
|
||||
(1825, "5y"),
|
||||
)
|
||||
YEAR_PERIODS = (
|
||||
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
|
||||
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
|
||||
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
|
||||
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
|
||||
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
|
||||
)
|
||||
|
||||
EXPANSION_MODES = {
|
||||
"prev_static": {"filter_name": "prev_balanced", "guarded": False},
|
||||
"guarded_static": {"filter_name": "guarded_positive", "guarded": False},
|
||||
"guarded_switch": {"filter_name": "prev_balanced", "guarded": True},
|
||||
"overheat_static": {"filter_name": "overheat_tolerant", "guarded": False},
|
||||
}
|
||||
EUPHORIA_MODES = {
|
||||
"overheat_static": {"filter_name": "overheat_tolerant", "guarded": False},
|
||||
"guarded_static": {"filter_name": "guarded_euphoria", "guarded": False},
|
||||
"guarded_switch": {"filter_name": "overheat_tolerant", "guarded": True},
|
||||
"prev_static": {"filter_name": "prev_balanced", "guarded": False},
|
||||
}
|
||||
CAP_ENGINES = ("cap_cash", "cap_btc_rebound")
|
||||
CHOP_ENGINES = ("chop_cash", "chop_inverse_carry", "chop_inverse_carry_strict")
|
||||
DIST_ENGINES = ("dist_cash", "dist_inverse_carry_strict")
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class HybridCandidate:
|
||||
regime_profile: str
|
||||
expansion_mode: str
|
||||
euphoria_mode: str
|
||||
cap_engine: str
|
||||
chop_engine: str
|
||||
dist_engine: str
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return (
|
||||
f"{self.regime_profile}"
|
||||
f"|exp:{self.expansion_mode}"
|
||||
f"|eup:{self.euphoria_mode}"
|
||||
f"|cap:{self.cap_engine}"
|
||||
f"|chop:{self.chop_engine}"
|
||||
f"|dist:{self.dist_engine}"
|
||||
)
|
||||
|
||||
|
||||
def _annualized_return(total_return: float, days: int) -> float:
|
||||
if days <= 0:
|
||||
return 0.0
|
||||
return (1.0 + total_return) ** (365.0 / days) - 1.0
|
||||
|
||||
|
||||
def _segment_curve(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> pd.Series:
|
||||
segment = curve.loc[(curve.index >= start) & (curve.index <= end)].copy()
|
||||
if segment.empty:
|
||||
return segment
|
||||
base = float(segment.iloc[0])
|
||||
if base <= 0:
|
||||
return pd.Series(dtype=float)
|
||||
return segment / base * 1000.0
|
||||
|
||||
|
||||
def _segment_metrics(curve: pd.Series, start: pd.Timestamp, end: pd.Timestamp) -> dict[str, float]:
|
||||
segment = _segment_curve(curve, start, end)
|
||||
if len(segment) < 2:
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": 0.0,
|
||||
"annualized_return": 0.0,
|
||||
"sharpe": 0.0,
|
||||
"max_drawdown": 0.0,
|
||||
}
|
||||
total_return = float(segment.iloc[-1] / segment.iloc[0] - 1.0)
|
||||
days = max(int((end - start) / pd.Timedelta(days=1)), 1)
|
||||
return {
|
||||
"start": str(start),
|
||||
"end": str(end),
|
||||
"total_return": total_return,
|
||||
"annualized_return": _annualized_return(total_return, days),
|
||||
"sharpe": sharpe_ratio(segment, 6),
|
||||
"max_drawdown": max_drawdown(segment),
|
||||
}
|
||||
|
||||
|
||||
def _score_candidate(window_results: dict[str, dict[str, float]], year_results: dict[str, dict[str, float]]) -> tuple[float, int, int]:
|
||||
year_returns = [float(metrics["total_return"]) for metrics in year_results.values()]
|
||||
negative_years = sum(ret < 0 for ret in year_returns)
|
||||
mdd_violations = sum(float(metrics["max_drawdown"]) < -0.20 for metrics in window_results.values())
|
||||
|
||||
score = 0.0
|
||||
score += 4.0 * float(window_results["5y"]["annualized_return"])
|
||||
score += 2.2 * float(window_results["1y"]["annualized_return"])
|
||||
score += 1.5 * float(window_results["2y"]["annualized_return"])
|
||||
score += 1.2 * float(window_results["4y"]["annualized_return"])
|
||||
score += 0.8 * float(window_results["3y"]["annualized_return"])
|
||||
score += 1.5 * float(window_results["5y"]["sharpe"])
|
||||
score += 0.8 * float(window_results["1y"]["sharpe"])
|
||||
score += 2.0 * min(year_returns)
|
||||
score += 1.0 * median(year_returns)
|
||||
score += 0.75 * sum(max(ret, 0.0) for ret in year_returns)
|
||||
score -= 3.0 * negative_years
|
||||
score -= 0.75 * mdd_violations
|
||||
for label in ("1y", "2y", "3y", "4y", "5y"):
|
||||
dd = abs(float(window_results[label]["max_drawdown"]))
|
||||
score -= max(0.0, dd - 0.20) * 4.0
|
||||
return score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _compose_full_curve(
|
||||
*,
|
||||
latest_bar: pd.Timestamp,
|
||||
timestamps: list[pd.Timestamp],
|
||||
regime_map: dict[pd.Timestamp, str],
|
||||
component_returns: dict[str, pd.Series],
|
||||
candidate: HybridCandidate,
|
||||
) -> pd.Series:
|
||||
equity = 1000.0
|
||||
idx = [timestamps[0]]
|
||||
vals = [equity]
|
||||
for i in range(1, len(timestamps)):
|
||||
signal_ts = timestamps[i - 1]
|
||||
execution_ts = timestamps[i]
|
||||
regime = regime_map.get(signal_ts, "")
|
||||
if regime == "MOMENTUM_EXPANSION":
|
||||
key = f"MOMENTUM_EXPANSION::{candidate.expansion_mode}"
|
||||
elif regime == "EUPHORIC_BREAKOUT":
|
||||
key = f"EUPHORIC_BREAKOUT::{candidate.euphoria_mode}"
|
||||
elif regime == "CAPITULATION_STRESS":
|
||||
key = candidate.cap_engine
|
||||
elif regime == "CHOPPY_ROTATION":
|
||||
key = candidate.chop_engine
|
||||
elif regime == "DISTRIBUTION_DRIFT":
|
||||
key = candidate.dist_engine
|
||||
else:
|
||||
key = ""
|
||||
ret = float(component_returns.get(key, pd.Series(dtype=float)).get(execution_ts, 0.0))
|
||||
equity *= max(0.0, 1.0 + ret)
|
||||
idx.append(execution_ts)
|
||||
vals.append(equity)
|
||||
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
|
||||
|
||||
|
||||
def _exact_validate_candidate(
|
||||
*,
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
candidate: HybridCandidate,
|
||||
) -> dict[str, object]:
|
||||
def run_period(eval_start: pd.Timestamp, eval_end: pd.Timestamp) -> pd.Series:
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
regime_frame = build_strategic_regime_frame(sliced, eval_start, eval_end, profile=candidate.regime_profile)
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
harness = AdverseRegimeResearchHarness(sliced, eval_end)
|
||||
component_returns: dict[str, pd.Series] = {}
|
||||
|
||||
for mode_name, spec in EXPANSION_MODES.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, "MOMENTUM_EXPANSION") if spec["guarded"] else None
|
||||
curve = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=eval_end,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime="MOMENTUM_EXPANSION",
|
||||
filter_name=str(spec["filter_name"]),
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
component_returns[f"MOMENTUM_EXPANSION::{mode_name}"] = _curve_returns(curve)
|
||||
|
||||
for mode_name, spec in EUPHORIA_MODES.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, "EUPHORIC_BREAKOUT") if spec["guarded"] else None
|
||||
curve = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=eval_end,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime="EUPHORIC_BREAKOUT",
|
||||
filter_name=str(spec["filter_name"]),
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
component_returns[f"EUPHORIC_BREAKOUT::{mode_name}"] = _curve_returns(curve)
|
||||
|
||||
for engine_name in {candidate.cap_engine, candidate.chop_engine, candidate.dist_engine}:
|
||||
curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=engine_name,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
component_returns[engine_name] = _curve_returns(curve)
|
||||
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
return _compose_full_curve(
|
||||
latest_bar=eval_end,
|
||||
timestamps=timestamps,
|
||||
regime_map=regime_map,
|
||||
component_returns=component_returns,
|
||||
candidate=candidate,
|
||||
)
|
||||
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
for days, label in WINDOWS:
|
||||
eval_end = latest_bar
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
curve = run_period(eval_start, eval_end)
|
||||
window_results[label] = _segment_metrics(curve, eval_start, eval_end)
|
||||
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
|
||||
curve = run_period(start, eval_end)
|
||||
year_results[label] = _segment_metrics(curve, start, eval_end)
|
||||
ytd_start = pd.Timestamp("2026-01-01 00:00:00+00:00")
|
||||
year_results["2026_YTD"] = _segment_metrics(run_period(ytd_start, latest_bar), ytd_start, latest_bar)
|
||||
|
||||
score, negative_years, mdd_violations = _score_candidate(window_results, {k: v for k, v in year_results.items() if k != "2026_YTD"})
|
||||
return {
|
||||
"candidate": asdict(candidate),
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"validation": "exact_independent_periods",
|
||||
}
|
||||
|
||||
|
||||
def _build_profile_cache(profile_name: str) -> tuple[str, dict[str, object]]:
|
||||
bundle, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
regime_frame = build_strategic_regime_frame(sliced, eval_start, latest_bar, profile=profile_name)
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
|
||||
component_returns: dict[str, pd.Series] = {}
|
||||
|
||||
for mode_name, spec in EXPANSION_MODES.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, "MOMENTUM_EXPANSION") if spec["guarded"] else None
|
||||
curve = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=latest_bar,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime="MOMENTUM_EXPANSION",
|
||||
filter_name=str(spec["filter_name"]),
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
component_returns[f"MOMENTUM_EXPANSION::{mode_name}"] = _curve_returns(curve)
|
||||
|
||||
for mode_name, spec in EUPHORIA_MODES.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, "EUPHORIC_BREAKOUT") if spec["guarded"] else None
|
||||
curve = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=latest_bar,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime="EUPHORIC_BREAKOUT",
|
||||
filter_name=str(spec["filter_name"]),
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
component_returns[f"EUPHORIC_BREAKOUT::{mode_name}"] = _curve_returns(curve)
|
||||
|
||||
for engine_name in sorted(set(CAP_ENGINES) | set(CHOP_ENGINES) | set(DIST_ENGINES)):
|
||||
curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=engine_name,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
component_returns[engine_name] = _curve_returns(curve)
|
||||
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
return profile_name, {
|
||||
"regime_map": regime_map,
|
||||
"component_returns": component_returns,
|
||||
"timestamps": timestamps,
|
||||
}
|
||||
|
||||
|
||||
def _exact_validate_candidate_worker(candidate_payload: dict[str, str]) -> dict[str, object]:
|
||||
bundle, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
|
||||
return _exact_validate_candidate(bundle=bundle, latest_bar=latest_bar, candidate=HybridCandidate(**candidate_payload))
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
|
||||
profile_caches: dict[str, dict[str, object]] = {}
|
||||
ctx = mp.get_context("fork")
|
||||
with ProcessPoolExecutor(max_workers=min(3, len(STRATEGIC_REGIME_PROFILES)), mp_context=ctx) as executor:
|
||||
future_map = {executor.submit(_build_profile_cache, profile_name): profile_name for profile_name in STRATEGIC_REGIME_PROFILES}
|
||||
for future in as_completed(future_map):
|
||||
profile_name, cache = future.result()
|
||||
profile_caches[profile_name] = cache
|
||||
print(f"[cache] built {profile_name}", flush=True)
|
||||
|
||||
candidates = [
|
||||
HybridCandidate(*combo)
|
||||
for combo in itertools.product(
|
||||
STRATEGIC_REGIME_PROFILES.keys(),
|
||||
EXPANSION_MODES.keys(),
|
||||
EUPHORIA_MODES.keys(),
|
||||
CAP_ENGINES,
|
||||
CHOP_ENGINES,
|
||||
DIST_ENGINES,
|
||||
)
|
||||
]
|
||||
|
||||
rows: list[dict[str, object]] = []
|
||||
for idx, candidate in enumerate(candidates, start=1):
|
||||
cache = profile_caches[candidate.regime_profile]
|
||||
full_curve = _compose_full_curve(
|
||||
latest_bar=latest_bar,
|
||||
timestamps=cache["timestamps"],
|
||||
regime_map=cache["regime_map"],
|
||||
component_returns=cache["component_returns"],
|
||||
candidate=candidate,
|
||||
)
|
||||
window_results = {
|
||||
label: _segment_metrics(full_curve, latest_bar - pd.Timedelta(days=days), latest_bar)
|
||||
for days, label in WINDOWS
|
||||
}
|
||||
year_results = {
|
||||
label: _segment_metrics(full_curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
|
||||
for label, start, end_exclusive in YEAR_PERIODS
|
||||
}
|
||||
year_results["2026_YTD"] = _segment_metrics(full_curve, pd.Timestamp("2026-01-01 00:00:00+00:00"), latest_bar)
|
||||
score, negative_years, mdd_violations = _score_candidate(window_results, {k: v for k, v in year_results.items() if k != "2026_YTD"})
|
||||
row = {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"validation": "approx_full_curve_slice",
|
||||
}
|
||||
rows.append(row)
|
||||
print(
|
||||
f"[{idx:03d}/{len(candidates)}] {candidate.name} "
|
||||
f"score={score:.3f} neg_years={negative_years} mdd_viol={mdd_violations} "
|
||||
f"1y={window_results['1y']['total_return']*100:.2f}% "
|
||||
f"5y_ann={window_results['5y']['annualized_return']*100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
|
||||
with ProcessPoolExecutor(max_workers=min(3, len(rows[:3])), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(_exact_validate_candidate_worker, row["candidate"]): row["name"]
|
||||
for row in rows[:3]
|
||||
}
|
||||
exact_top = []
|
||||
for future in as_completed(future_map):
|
||||
result = future.result()
|
||||
exact_top.append(result)
|
||||
print(f"[exact] validated {future_map[future]}", flush=True)
|
||||
exact_top.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
|
||||
|
||||
payload = {
|
||||
"analysis": "strategy32_hybrid_strategy_search",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"candidate_count": len(candidates),
|
||||
"regime_profiles": list(STRATEGIC_REGIME_PROFILES.keys()),
|
||||
"expansion_modes": list(EXPANSION_MODES.keys()),
|
||||
"euphoria_modes": list(EUPHORIA_MODES.keys()),
|
||||
"cap_engines": list(CAP_ENGINES),
|
||||
"chop_engines": list(CHOP_ENGINES),
|
||||
"dist_engines": list(DIST_ENGINES),
|
||||
"summary": rows[:20],
|
||||
"exact_top": exact_top,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
|
||||
print("\nTop 5 approximate candidates", flush=True)
|
||||
for row in rows[:5]:
|
||||
print(
|
||||
row["name"],
|
||||
"score",
|
||||
round(float(row["score"]), 3),
|
||||
"neg_years",
|
||||
row["negative_years"],
|
||||
"mdd_viol",
|
||||
row["mdd_violations"],
|
||||
"2025",
|
||||
round(float(row["years"]["2025"]["total_return"]) * 100, 2),
|
||||
"2024",
|
||||
round(float(row["years"]["2024"]["total_return"]) * 100, 2),
|
||||
"1y",
|
||||
round(float(row["windows"]["1y"]["total_return"]) * 100, 2),
|
||||
"5y_ann",
|
||||
round(float(row["windows"]["5y"]["annualized_return"]) * 100, 2),
|
||||
)
|
||||
|
||||
print("\nExact top candidates", flush=True)
|
||||
for row in exact_top:
|
||||
print(
|
||||
HybridCandidate(**row["candidate"]).name,
|
||||
"score",
|
||||
round(float(row["score"]), 3),
|
||||
"neg_years",
|
||||
row["negative_years"],
|
||||
"mdd_viol",
|
||||
row["mdd_violations"],
|
||||
"2025",
|
||||
round(float(row["years"]["2025"]["total_return"]) * 100, 2),
|
||||
"2024",
|
||||
round(float(row["years"]["2024"]["total_return"]) * 100, 2),
|
||||
"1y",
|
||||
round(float(row["windows"]["1y"]["total_return"]) * 100, 2),
|
||||
"5y_ann",
|
||||
round(float(row["windows"]["5y"]["annualized_return"]) * 100, 2),
|
||||
)
|
||||
print("\nwrote", OUT_JSON, flush=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
281
scripts/run_hybrid_strategy_search_fast.py
Normal file
281
scripts/run_hybrid_strategy_search_fast.py
Normal file
@@ -0,0 +1,281 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import multiprocessing as mp
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from dataclasses import asdict
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
|
||||
from strategy32.research.hybrid_regime import (
|
||||
_build_positive_filter_plan,
|
||||
_curve_returns,
|
||||
_run_adverse_component_curve,
|
||||
_run_static_component_curve,
|
||||
load_fixed66_bundle,
|
||||
)
|
||||
from strategy32.scripts.run_hybrid_strategy_search import (
|
||||
CAP_ENGINES,
|
||||
CHOP_ENGINES,
|
||||
DIST_ENGINES,
|
||||
EUPHORIA_MODES,
|
||||
EXPANSION_MODES,
|
||||
OUT_JSON,
|
||||
WINDOWS,
|
||||
YEAR_PERIODS,
|
||||
HybridCandidate,
|
||||
_build_profile_cache,
|
||||
_compose_full_curve,
|
||||
_score_candidate,
|
||||
_segment_metrics,
|
||||
)
|
||||
from strategy32.scripts.run_regime_filter_analysis import STRATEGIC_REGIME_PROFILES, build_strategic_regime_frame
|
||||
|
||||
|
||||
FAST_OUT_JSON = Path("/tmp/strategy32_hybrid_strategy_search_fast.json")
|
||||
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
|
||||
|
||||
|
||||
def _build_candidate_rows(latest_bar: pd.Timestamp, profile_caches: dict[str, dict[str, object]]) -> list[dict[str, object]]:
|
||||
candidates = [
|
||||
HybridCandidate(*combo)
|
||||
for combo in __import__("itertools").product(
|
||||
STRATEGIC_REGIME_PROFILES.keys(),
|
||||
EXPANSION_MODES.keys(),
|
||||
EUPHORIA_MODES.keys(),
|
||||
CAP_ENGINES,
|
||||
CHOP_ENGINES,
|
||||
DIST_ENGINES,
|
||||
)
|
||||
]
|
||||
rows: list[dict[str, object]] = []
|
||||
for idx, candidate in enumerate(candidates, start=1):
|
||||
cache = profile_caches[candidate.regime_profile]
|
||||
full_curve = _compose_full_curve(
|
||||
latest_bar=latest_bar,
|
||||
timestamps=cache["timestamps"],
|
||||
regime_map=cache["regime_map"],
|
||||
component_returns=cache["component_returns"],
|
||||
candidate=candidate,
|
||||
)
|
||||
window_results = {
|
||||
label: _segment_metrics(full_curve, latest_bar - pd.Timedelta(days=days), latest_bar)
|
||||
for days, label in WINDOWS
|
||||
}
|
||||
year_results = {
|
||||
label: _segment_metrics(full_curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
|
||||
for label, start, end_exclusive in YEAR_PERIODS
|
||||
}
|
||||
year_results["2026_YTD"] = _segment_metrics(full_curve, YTD_START, latest_bar)
|
||||
score, negative_years, mdd_violations = _score_candidate(
|
||||
window_results,
|
||||
{k: v for k, v in year_results.items() if k != "2026_YTD"},
|
||||
)
|
||||
row = {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"validation": "approx_full_curve_slice",
|
||||
}
|
||||
rows.append(row)
|
||||
print(
|
||||
f"[approx {idx:03d}/{len(candidates)}] {candidate.name} "
|
||||
f"score={score:.3f} neg_years={negative_years} mdd_viol={mdd_violations} "
|
||||
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
|
||||
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
|
||||
return rows
|
||||
|
||||
|
||||
def _period_specs(latest_bar: pd.Timestamp) -> list[tuple[str, str, pd.Timestamp, pd.Timestamp]]:
|
||||
specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for days, label in WINDOWS:
|
||||
specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
|
||||
specs.append(("year", "2026_YTD", YTD_START, latest_bar))
|
||||
return specs
|
||||
|
||||
|
||||
def _exact_period_worker(candidate_payload: dict[str, str], period_spec: tuple[str, str, str, str]) -> tuple[str, str, dict[str, float]]:
|
||||
kind, label, start_text, end_text = period_spec
|
||||
eval_start = pd.Timestamp(start_text)
|
||||
eval_end = pd.Timestamp(end_text)
|
||||
bundle, _ = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
|
||||
candidate = HybridCandidate(**candidate_payload)
|
||||
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
regime_frame = build_strategic_regime_frame(sliced, eval_start, eval_end, profile=candidate.regime_profile)
|
||||
regime_map = dict(zip(pd.to_datetime(regime_frame["timestamp"]), regime_frame["strategic_regime"]))
|
||||
harness = AdverseRegimeResearchHarness(sliced, eval_end)
|
||||
component_returns: dict[str, pd.Series] = {}
|
||||
|
||||
for mode_name, spec in EXPANSION_MODES.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, "MOMENTUM_EXPANSION") if spec["guarded"] else None
|
||||
curve = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=eval_end,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime="MOMENTUM_EXPANSION",
|
||||
filter_name=str(spec["filter_name"]),
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
component_returns[f"MOMENTUM_EXPANSION::{mode_name}"] = _curve_returns(curve)
|
||||
|
||||
for mode_name, spec in EUPHORIA_MODES.items():
|
||||
filter_plan = _build_positive_filter_plan(regime_frame, "EUPHORIC_BREAKOUT") if spec["guarded"] else None
|
||||
curve = _run_static_component_curve(
|
||||
sliced=sliced,
|
||||
latest_bar=eval_end,
|
||||
eval_start=eval_start,
|
||||
regime_map=regime_map,
|
||||
active_regime="EUPHORIC_BREAKOUT",
|
||||
filter_name=str(spec["filter_name"]),
|
||||
filter_plan=filter_plan,
|
||||
)
|
||||
component_returns[f"EUPHORIC_BREAKOUT::{mode_name}"] = _curve_returns(curve)
|
||||
|
||||
for engine_name in sorted({candidate.cap_engine, candidate.chop_engine, candidate.dist_engine}):
|
||||
curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=engine_name,
|
||||
harness=harness,
|
||||
regime_frame=regime_frame,
|
||||
)
|
||||
component_returns[engine_name] = _curve_returns(curve)
|
||||
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
curve = _compose_full_curve(
|
||||
latest_bar=eval_end,
|
||||
timestamps=timestamps,
|
||||
regime_map=regime_map,
|
||||
component_returns=component_returns,
|
||||
candidate=candidate,
|
||||
)
|
||||
return kind, label, _segment_metrics(curve, eval_start, eval_end)
|
||||
|
||||
|
||||
def _exact_validate_candidate_parallel(
|
||||
candidate: HybridCandidate,
|
||||
latest_bar: pd.Timestamp,
|
||||
*,
|
||||
max_workers: int,
|
||||
) -> dict[str, object]:
|
||||
period_specs = [
|
||||
(kind, label, str(start), str(end))
|
||||
for kind, label, start, end in _period_specs(latest_bar)
|
||||
]
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
ctx = mp.get_context("fork")
|
||||
with ProcessPoolExecutor(max_workers=min(max_workers, len(period_specs)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(_exact_period_worker, asdict(candidate), period_spec): period_spec
|
||||
for period_spec in period_specs
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
kind, label, metrics = future.result()
|
||||
if kind == "window":
|
||||
window_results[label] = metrics
|
||||
else:
|
||||
year_results[label] = metrics
|
||||
print(
|
||||
f"[exact-period] {candidate.name} {label} "
|
||||
f"ret={metrics['total_return'] * 100:.2f}% "
|
||||
f"mdd={metrics['max_drawdown'] * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
ordered_windows = {label: window_results[label] for _, label in WINDOWS}
|
||||
ordered_years = {label: year_results[label] for label, _, _ in YEAR_PERIODS}
|
||||
ordered_years["2026_YTD"] = year_results["2026_YTD"]
|
||||
score, negative_years, mdd_violations = _score_candidate(
|
||||
ordered_windows,
|
||||
{k: v for k, v in ordered_years.items() if k != "2026_YTD"},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": ordered_windows,
|
||||
"years": ordered_years,
|
||||
"validation": "exact_independent_periods_parallel",
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--top-n", type=int, default=3)
|
||||
parser.add_argument("--exact-workers", type=int, default=6)
|
||||
parser.add_argument("--out", type=str, default=str(FAST_OUT_JSON))
|
||||
args = parser.parse_args()
|
||||
|
||||
_, latest_bar = load_fixed66_bundle("/tmp/strategy32_fixed66_bundle.pkl")
|
||||
profile_caches: dict[str, dict[str, object]] = {}
|
||||
ctx = mp.get_context("fork")
|
||||
with ProcessPoolExecutor(max_workers=min(3, len(STRATEGIC_REGIME_PROFILES)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(_build_profile_cache, profile_name): profile_name
|
||||
for profile_name in STRATEGIC_REGIME_PROFILES
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
profile_name, cache = future.result()
|
||||
profile_caches[profile_name] = cache
|
||||
print(f"[cache] built {profile_name}", flush=True)
|
||||
|
||||
rows = _build_candidate_rows(latest_bar, profile_caches)
|
||||
exact_top: list[dict[str, object]] = []
|
||||
for row in rows[: args.top_n]:
|
||||
candidate = HybridCandidate(**row["candidate"])
|
||||
print(f"[exact-start] {candidate.name}", flush=True)
|
||||
exact_top.append(
|
||||
_exact_validate_candidate_parallel(
|
||||
candidate,
|
||||
latest_bar,
|
||||
max_workers=args.exact_workers,
|
||||
)
|
||||
)
|
||||
exact_top.sort(key=lambda item: (int(item["negative_years"]), int(item["mdd_violations"]), -float(item["score"])))
|
||||
payload = {
|
||||
"analysis": "strategy32_hybrid_strategy_search_fast",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"candidate_count": len(rows),
|
||||
"summary": rows[:20],
|
||||
"exact_top": exact_top,
|
||||
}
|
||||
Path(args.out).write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(f"[exact-done] {candidate.name}", flush=True)
|
||||
|
||||
Path(OUT_JSON).write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"analysis": "strategy32_hybrid_strategy_search_fast_link",
|
||||
"source": str(Path(args.out)),
|
||||
},
|
||||
indent=2,
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
54
scripts/run_live_combo_backtest.py
Normal file
54
scripts/run_live_combo_backtest.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
|
||||
from strategy32.research.soft_router import evaluate_cash_overlay_exact, load_component_bundle
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_live_combo_backtest.json")
|
||||
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle(CACHE_PATH)
|
||||
payload = evaluate_cash_overlay_exact(
|
||||
bundle=bundle,
|
||||
latest_bar=latest_bar,
|
||||
candidate=BEST_CASH_OVERLAY,
|
||||
cache_path=CACHE_PATH,
|
||||
max_workers=6,
|
||||
core_config_overrides={
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
},
|
||||
)
|
||||
payload["backtest_basis"] = {
|
||||
"universe": "fixed66 cached bundle",
|
||||
"core_filter": "overheat_tolerant",
|
||||
"cash_overlay": payload["candidate"],
|
||||
"core_config_overrides": {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
},
|
||||
"execution_refinement_note": "4h proxy in research bundle; live 1h refinement is not replayed here",
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(json.dumps(payload, indent=2))
|
||||
print(f"[saved] {OUT_JSON}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
62
scripts/run_live_monitor.py
Normal file
62
scripts/run_live_monitor.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.live.env import load_dotenv
|
||||
from strategy32.live.runtime import run_monitor
|
||||
|
||||
|
||||
def _default_env_candidates() -> list[Path]:
|
||||
return [
|
||||
Path(__file__).resolve().parents[1] / ".env",
|
||||
Path("/Volumes/SSD/workspace/money-bot/strategy11/.env"),
|
||||
Path("/Volumes/SSD/workspace/money-bot/strategy7/engine_a_mm/.env"),
|
||||
Path("/Volumes/SSD/workspace/money-bot/strategy7/engine_aa_mm/.env"),
|
||||
]
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Run Strategy32 live paper/advisory monitor")
|
||||
parser.add_argument("--once", action="store_true", help="Run one cycle and exit")
|
||||
parser.add_argument("--runtime-dir", type=str, default=os.getenv("STRATEGY32_RUNTIME_DIR", "runtime"))
|
||||
parser.add_argument("--env-file", type=str, default="")
|
||||
parser.add_argument("--log-level", type=str, default=os.getenv("STRATEGY32_LOG_LEVEL", "INFO"))
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
if args.env_file:
|
||||
load_dotenv(args.env_file)
|
||||
else:
|
||||
for env_path in _default_env_candidates():
|
||||
if env_path.exists():
|
||||
load_dotenv(env_path)
|
||||
break
|
||||
|
||||
runtime_dir = Path(args.runtime_dir)
|
||||
runtime_dir.mkdir(parents=True, exist_ok=True)
|
||||
handlers: list[logging.Handler] = [
|
||||
logging.StreamHandler(),
|
||||
logging.FileHandler(runtime_dir / "strategy32_live.log", encoding="utf-8"),
|
||||
]
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, args.log_level.upper(), logging.INFO),
|
||||
format="%(asctime)s %(levelname)-5s %(name)s %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
handlers=handlers,
|
||||
)
|
||||
asyncio.run(run_monitor(once=args.once, runtime_dir=args.runtime_dir))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
166
scripts/run_recent_core_filter_comparison.py
Normal file
166
scripts/run_recent_core_filter_comparison.py
Normal file
@@ -0,0 +1,166 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import multiprocessing as mp
|
||||
import sys
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
|
||||
from strategy32.research.soft_router import (
|
||||
build_cash_overlay_period_components,
|
||||
compose_cash_overlay_curve,
|
||||
segment_metrics,
|
||||
load_component_bundle,
|
||||
)
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_recent_core_filter_comparison.json")
|
||||
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
|
||||
PERIODS = {
|
||||
"1y": lambda latest_bar: (latest_bar - pd.Timedelta(days=365), latest_bar),
|
||||
"2y": lambda latest_bar: (latest_bar - pd.Timedelta(days=730), latest_bar),
|
||||
"5y": lambda latest_bar: (latest_bar - pd.Timedelta(days=1825), latest_bar),
|
||||
"2024": lambda latest_bar: (pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2024-12-31 23:59:59+00:00")),
|
||||
"2025": lambda latest_bar: (pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2025-12-31 23:59:59+00:00")),
|
||||
"2026_YTD": lambda latest_bar: (pd.Timestamp("2026-01-01 00:00:00+00:00"), latest_bar),
|
||||
}
|
||||
|
||||
VARIANTS: dict[str, dict[str, object]] = {
|
||||
"current_overheat": {
|
||||
"core_filter": "overheat_tolerant",
|
||||
"overrides": {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
},
|
||||
},
|
||||
"prev_balanced": {
|
||||
"core_filter": "prev_balanced",
|
||||
"overrides": {
|
||||
"enable_liquidity_universe_fallback": False,
|
||||
"enable_momentum_filter_fallback": False,
|
||||
"enable_carry_score_fallback": False,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
},
|
||||
},
|
||||
"guarded_positive": {
|
||||
"core_filter": "guarded_positive",
|
||||
"overrides": {
|
||||
"enable_liquidity_universe_fallback": False,
|
||||
"enable_momentum_filter_fallback": False,
|
||||
"enable_carry_score_fallback": False,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
},
|
||||
},
|
||||
"relaxed_overheat": {
|
||||
"core_filter": "overheat_tolerant",
|
||||
"overrides": {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"momentum_min_score": 0.58,
|
||||
"momentum_min_relative_strength": -0.03,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"universe_min_avg_dollar_volume": 75_000_000.0,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle(CACHE_PATH)
|
||||
output: dict[str, object] = {
|
||||
"latest_bar": str(latest_bar),
|
||||
"candidate": {
|
||||
"regime_profile": BEST_CASH_OVERLAY.regime_profile,
|
||||
"cap_engine": BEST_CASH_OVERLAY.cap_engine,
|
||||
"chop_engine": BEST_CASH_OVERLAY.chop_engine,
|
||||
"dist_engine": BEST_CASH_OVERLAY.dist_engine,
|
||||
},
|
||||
"note": "fixed66 cached bundle, 4h proxy execution, same cash-overlay with different core filters",
|
||||
"variants": {},
|
||||
}
|
||||
|
||||
for name, spec in VARIANTS.items():
|
||||
output["variants"][name] = {
|
||||
"core_filter": spec["core_filter"],
|
||||
"overrides": spec["overrides"],
|
||||
"results": {},
|
||||
}
|
||||
|
||||
tasks: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
|
||||
for variant_name in VARIANTS:
|
||||
for label, period_fn in PERIODS.items():
|
||||
start, end = period_fn(latest_bar)
|
||||
tasks.append((variant_name, label, start, end))
|
||||
|
||||
ctx = mp.get_context("fork")
|
||||
with ProcessPoolExecutor(max_workers=min(6, len(tasks)), mp_context=ctx) as executor:
|
||||
future_map = {
|
||||
executor.submit(
|
||||
_period_worker,
|
||||
CACHE_PATH,
|
||||
variant_name,
|
||||
label,
|
||||
str(start),
|
||||
str(end),
|
||||
): (variant_name, label)
|
||||
for variant_name, label, start, end in tasks
|
||||
}
|
||||
for future in as_completed(future_map):
|
||||
variant_name, label = future_map[future]
|
||||
print(f"[done] {variant_name} {label}", flush=True)
|
||||
result = future.result()
|
||||
output["variants"][variant_name]["results"][label] = result
|
||||
|
||||
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
|
||||
print(json.dumps(output, indent=2))
|
||||
print(f"[saved] {OUT_JSON}")
|
||||
|
||||
|
||||
def _period_worker(
|
||||
cache_path: str,
|
||||
variant_name: str,
|
||||
label: str,
|
||||
start_text: str,
|
||||
end_text: str,
|
||||
) -> dict[str, float]:
|
||||
bundle, _latest_bar = load_component_bundle(cache_path)
|
||||
spec = VARIANTS[variant_name]
|
||||
start = pd.Timestamp(start_text)
|
||||
end = pd.Timestamp(end_text)
|
||||
components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=start,
|
||||
eval_end=end,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=str(spec["core_filter"]),
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=dict(spec["overrides"]),
|
||||
)
|
||||
curve, _weights = compose_cash_overlay_curve(candidate=BEST_CASH_OVERLAY, **components)
|
||||
return segment_metrics(curve, start, end)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
498
scripts/run_regime_filter_analysis.py
Normal file
498
scripts/run_regime_filter_analysis.py
Normal file
@@ -0,0 +1,498 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy29.common.constants import BTC_SYMBOL
|
||||
from strategy29.signal.btc_regime import BTCRegimeEngine
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, Strategy32Config, build_engine_config, build_strategy32_config
|
||||
from strategy32.data import build_strategy32_market_bundle
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_regime_filter_analysis.json")
|
||||
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/008_레짐별_필터적합도_분석.md")
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class FilterVariant:
|
||||
key: str
|
||||
label: str
|
||||
liquidity_floor: float
|
||||
avg_dollar_floor: float
|
||||
momentum_score: float
|
||||
relative_strength: float
|
||||
ret7: float
|
||||
corr_cap: float
|
||||
carry_edge: float
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class StrategicRegimeProfile:
|
||||
name: str
|
||||
panic_atr: float
|
||||
panic_bar_return: float
|
||||
panic_breadth: float
|
||||
panic_funding: float
|
||||
euphoria_daily_gap: float
|
||||
euphoria_intraday_gap: float
|
||||
euphoria_breadth: float
|
||||
euphoria_breadth_persist: float
|
||||
euphoria_positive_ratio: float
|
||||
euphoria_funding_persist: float
|
||||
euphoria_funding: float
|
||||
euphoria_btc_7d: float
|
||||
expansion_daily_gap: float
|
||||
expansion_intraday_gap: float
|
||||
expansion_breadth: float
|
||||
expansion_breadth_persist: float
|
||||
expansion_atr: float
|
||||
expansion_min_funding: float
|
||||
expansion_btc_7d: float
|
||||
distribution_daily_gap: float
|
||||
distribution_intraday_gap: float
|
||||
distribution_breadth: float
|
||||
distribution_positive_ratio: float
|
||||
|
||||
|
||||
STRATEGIC_REGIME_PROFILES: dict[str, StrategicRegimeProfile] = {
|
||||
"base": StrategicRegimeProfile(
|
||||
name="base",
|
||||
panic_atr=0.05,
|
||||
panic_bar_return=-0.05,
|
||||
panic_breadth=0.22,
|
||||
panic_funding=-0.00005,
|
||||
euphoria_daily_gap=0.05,
|
||||
euphoria_intraday_gap=0.015,
|
||||
euphoria_breadth=0.68,
|
||||
euphoria_breadth_persist=0.62,
|
||||
euphoria_positive_ratio=0.72,
|
||||
euphoria_funding_persist=0.66,
|
||||
euphoria_funding=0.00012,
|
||||
euphoria_btc_7d=0.10,
|
||||
expansion_daily_gap=0.02,
|
||||
expansion_intraday_gap=0.00,
|
||||
expansion_breadth=0.55,
|
||||
expansion_breadth_persist=0.52,
|
||||
expansion_atr=0.05,
|
||||
expansion_min_funding=-0.00003,
|
||||
expansion_btc_7d=0.0,
|
||||
distribution_daily_gap=0.00,
|
||||
distribution_intraday_gap=0.00,
|
||||
distribution_breadth=0.45,
|
||||
distribution_positive_ratio=0.45,
|
||||
),
|
||||
"tight_positive": StrategicRegimeProfile(
|
||||
name="tight_positive",
|
||||
panic_atr=0.048,
|
||||
panic_bar_return=-0.048,
|
||||
panic_breadth=0.24,
|
||||
panic_funding=-0.00004,
|
||||
euphoria_daily_gap=0.055,
|
||||
euphoria_intraday_gap=0.018,
|
||||
euphoria_breadth=0.72,
|
||||
euphoria_breadth_persist=0.66,
|
||||
euphoria_positive_ratio=0.75,
|
||||
euphoria_funding_persist=0.70,
|
||||
euphoria_funding=0.00014,
|
||||
euphoria_btc_7d=0.12,
|
||||
expansion_daily_gap=0.028,
|
||||
expansion_intraday_gap=0.004,
|
||||
expansion_breadth=0.60,
|
||||
expansion_breadth_persist=0.57,
|
||||
expansion_atr=0.045,
|
||||
expansion_min_funding=0.0,
|
||||
expansion_btc_7d=0.02,
|
||||
distribution_daily_gap=0.005,
|
||||
distribution_intraday_gap=0.002,
|
||||
distribution_breadth=0.48,
|
||||
distribution_positive_ratio=0.48,
|
||||
),
|
||||
"loose_positive": StrategicRegimeProfile(
|
||||
name="loose_positive",
|
||||
panic_atr=0.052,
|
||||
panic_bar_return=-0.055,
|
||||
panic_breadth=0.20,
|
||||
panic_funding=-0.00006,
|
||||
euphoria_daily_gap=0.045,
|
||||
euphoria_intraday_gap=0.012,
|
||||
euphoria_breadth=0.64,
|
||||
euphoria_breadth_persist=0.58,
|
||||
euphoria_positive_ratio=0.68,
|
||||
euphoria_funding_persist=0.62,
|
||||
euphoria_funding=0.00010,
|
||||
euphoria_btc_7d=0.08,
|
||||
expansion_daily_gap=0.015,
|
||||
expansion_intraday_gap=-0.002,
|
||||
expansion_breadth=0.50,
|
||||
expansion_breadth_persist=0.48,
|
||||
expansion_atr=0.055,
|
||||
expansion_min_funding=-0.00005,
|
||||
expansion_btc_7d=-0.01,
|
||||
distribution_daily_gap=-0.005,
|
||||
distribution_intraday_gap=-0.004,
|
||||
distribution_breadth=0.42,
|
||||
distribution_positive_ratio=0.42,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
FILTER_VARIANTS = [
|
||||
FilterVariant("prev_balanced", "Legacy Balanced", 50_000_000.0, 50_000_000.0, 0.60, 0.00, 0.00, 0.70, 0.0),
|
||||
FilterVariant("prev_profit", "Legacy Profit", 50_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.78, 0.0),
|
||||
FilterVariant("new_default", "New Durable", 100_000_000.0, 50_000_000.0, 0.65, 0.00, 0.00, 0.70, 0.0),
|
||||
FilterVariant("high_liq_breakout", "High-Liq Breakout", 100_000_000.0, 75_000_000.0, 0.65, 0.00, 0.02, 0.78, 0.001),
|
||||
FilterVariant("overheat_tolerant", "Overheat Tolerant", 100_000_000.0, 100_000_000.0, 0.60, -0.02, 0.02, 0.78, 0.0),
|
||||
FilterVariant("ultra_selective", "Ultra Selective", 100_000_000.0, 50_000_000.0, 0.70, 0.02, 0.00, 0.78, 0.0),
|
||||
]
|
||||
|
||||
|
||||
def _price_at(df: pd.DataFrame, timestamp: pd.Timestamp) -> float:
|
||||
row = df.loc[df["timestamp"] == timestamp]
|
||||
if row.empty:
|
||||
return 0.0
|
||||
return float(row["close"].iloc[-1])
|
||||
|
||||
|
||||
def _funding_row(df: pd.DataFrame, timestamp: pd.Timestamp) -> pd.Series | None:
|
||||
row = df.loc[df["timestamp"] == timestamp]
|
||||
if row.empty:
|
||||
return None
|
||||
return row.iloc[-1]
|
||||
|
||||
|
||||
def _daily_return(series: pd.Series, bars_back: int) -> pd.Series:
|
||||
return series / series.shift(bars_back) - 1.0
|
||||
|
||||
|
||||
def build_strategic_regime_frame(
|
||||
bundle,
|
||||
eval_start: pd.Timestamp,
|
||||
latest_bar: pd.Timestamp,
|
||||
profile: StrategicRegimeProfile | str = "base",
|
||||
) -> pd.DataFrame:
|
||||
if isinstance(profile, str):
|
||||
profile = STRATEGIC_REGIME_PROFILES[profile]
|
||||
btc_prices = bundle.prices[BTC_SYMBOL]
|
||||
prepared = BTCRegimeEngine(build_engine_config().regime).prepare(btc_prices)
|
||||
prepared = prepared.loc[prepared["timestamp"] >= eval_start].copy()
|
||||
timestamps = prepared["timestamp"].tolist()
|
||||
|
||||
breadths: list[float] = []
|
||||
mean_funding: list[float] = []
|
||||
positive_funding_ratio: list[float] = []
|
||||
mean_basis: list[float] = []
|
||||
btc_7d_return: list[float] = []
|
||||
btc_30d_return: list[float] = []
|
||||
|
||||
for idx, ts in enumerate(timestamps):
|
||||
votes = []
|
||||
funding_vals = []
|
||||
basis_vals = []
|
||||
positive_votes = []
|
||||
for symbol, df in bundle.prices.items():
|
||||
if symbol == BTC_SYMBOL:
|
||||
continue
|
||||
hist = df.loc[df["timestamp"] <= ts].tail(30)
|
||||
if len(hist) >= 10:
|
||||
ema = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
|
||||
votes.append(float(hist["close"].iloc[-1] > ema))
|
||||
f_df = bundle.funding.get(symbol)
|
||||
if f_df is None:
|
||||
continue
|
||||
f_hist = f_df.loc[f_df["timestamp"] <= ts].tail(6)
|
||||
if f_hist.empty:
|
||||
continue
|
||||
funding_vals.append(float(f_hist["funding_rate"].mean()))
|
||||
basis_vals.append(float(f_hist["basis"].iloc[-1]))
|
||||
positive_votes.append(float((f_hist["funding_rate"] > 0).mean()))
|
||||
breadths.append(sum(votes) / len(votes) if votes else 0.5)
|
||||
mean_funding.append(sum(funding_vals) / len(funding_vals) if funding_vals else 0.0)
|
||||
mean_basis.append(sum(basis_vals) / len(basis_vals) if basis_vals else 0.0)
|
||||
positive_funding_ratio.append(sum(positive_votes) / len(positive_votes) if positive_votes else 0.5)
|
||||
|
||||
if idx >= 42:
|
||||
btc_7d_return.append(float(prepared["close"].iloc[idx] / prepared["close"].iloc[idx - 42] - 1.0))
|
||||
else:
|
||||
btc_7d_return.append(0.0)
|
||||
if idx >= 180:
|
||||
btc_30d_return.append(float(prepared["close"].iloc[idx] / prepared["close"].iloc[idx - 180] - 1.0))
|
||||
else:
|
||||
btc_30d_return.append(0.0)
|
||||
|
||||
prepared["breadth"] = breadths
|
||||
prepared["mean_alt_funding"] = mean_funding
|
||||
prepared["mean_alt_basis"] = mean_basis
|
||||
prepared["positive_funding_ratio"] = positive_funding_ratio
|
||||
prepared["btc_7d_return"] = btc_7d_return
|
||||
prepared["btc_30d_return"] = btc_30d_return
|
||||
prepared["daily_trend_gap"] = prepared["daily_close"] / prepared["daily_ema_slow"] - 1.0
|
||||
prepared["intraday_trend_gap"] = prepared["close"] / prepared["ema_slow"] - 1.0
|
||||
prepared["breadth_persist"] = prepared["breadth"].rolling(18, min_periods=6).mean()
|
||||
prepared["funding_persist"] = prepared["positive_funding_ratio"].rolling(18, min_periods=6).mean()
|
||||
|
||||
regimes: list[str] = []
|
||||
for row in prepared.itertuples(index=False):
|
||||
breadth = float(row.breadth)
|
||||
breadth_persist = float(row.breadth_persist) if pd.notna(row.breadth_persist) else breadth
|
||||
atr = float(row.atr_pct) if pd.notna(row.atr_pct) else 0.0
|
||||
bar_ret = float(row.bar_return) if pd.notna(row.bar_return) else 0.0
|
||||
daily_gap = float(row.daily_trend_gap) if pd.notna(row.daily_trend_gap) else 0.0
|
||||
intra_gap = float(row.intraday_trend_gap) if pd.notna(row.intraday_trend_gap) else 0.0
|
||||
avg_funding = float(row.mean_alt_funding)
|
||||
positive_ratio = float(row.positive_funding_ratio)
|
||||
funding_persist = float(row.funding_persist) if pd.notna(row.funding_persist) else positive_ratio
|
||||
btc_7d = float(row.btc_7d_return)
|
||||
|
||||
panic = (
|
||||
atr >= profile.panic_atr
|
||||
or bar_ret <= profile.panic_bar_return
|
||||
or (breadth <= profile.panic_breadth and avg_funding < profile.panic_funding)
|
||||
)
|
||||
euphoria = (
|
||||
daily_gap > profile.euphoria_daily_gap
|
||||
and intra_gap > profile.euphoria_intraday_gap
|
||||
and breadth >= profile.euphoria_breadth
|
||||
and breadth_persist >= profile.euphoria_breadth_persist
|
||||
and positive_ratio >= profile.euphoria_positive_ratio
|
||||
and funding_persist >= profile.euphoria_funding_persist
|
||||
and (avg_funding >= profile.euphoria_funding or btc_7d >= profile.euphoria_btc_7d)
|
||||
)
|
||||
expansion = (
|
||||
daily_gap > profile.expansion_daily_gap
|
||||
and intra_gap > profile.expansion_intraday_gap
|
||||
and breadth >= profile.expansion_breadth
|
||||
and breadth_persist >= profile.expansion_breadth_persist
|
||||
and atr < profile.expansion_atr
|
||||
and avg_funding > profile.expansion_min_funding
|
||||
and btc_7d > profile.expansion_btc_7d
|
||||
and not euphoria
|
||||
)
|
||||
distribution = (
|
||||
(daily_gap < profile.distribution_daily_gap and breadth < profile.distribution_breadth)
|
||||
or (intra_gap < profile.distribution_intraday_gap and breadth < max(profile.distribution_breadth - 0.07, 0.0))
|
||||
or (avg_funding < 0.0 and positive_ratio < profile.distribution_positive_ratio and breadth < profile.distribution_breadth)
|
||||
)
|
||||
|
||||
if panic:
|
||||
regimes.append("CAPITULATION_STRESS")
|
||||
elif euphoria:
|
||||
regimes.append("EUPHORIC_BREAKOUT")
|
||||
elif expansion:
|
||||
regimes.append("MOMENTUM_EXPANSION")
|
||||
elif distribution:
|
||||
regimes.append("DISTRIBUTION_DRIFT")
|
||||
else:
|
||||
regimes.append("CHOPPY_ROTATION")
|
||||
|
||||
prepared["strategic_regime"] = regimes
|
||||
return prepared.reset_index(drop=True)
|
||||
|
||||
|
||||
def build_variant_config(variant: FilterVariant) -> Strategy32Config:
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
cfg.discovery_min_quote_volume_24h = variant.liquidity_floor
|
||||
cfg.universe_min_avg_dollar_volume = variant.avg_dollar_floor
|
||||
cfg.momentum_min_score = variant.momentum_score
|
||||
cfg.momentum_min_relative_strength = variant.relative_strength
|
||||
cfg.momentum_min_7d_return = variant.ret7
|
||||
cfg.max_pairwise_correlation = variant.corr_cap
|
||||
cfg.carry_min_expected_edge = variant.carry_edge
|
||||
return cfg
|
||||
|
||||
|
||||
def regime_metrics_from_equity(curve: pd.Series, regime_frame: pd.DataFrame, bars_per_day: int) -> dict[str, dict[str, float]]:
|
||||
returns = curve.pct_change().fillna(0.0).rename("equity_bar_return")
|
||||
frame = regime_frame.merge(returns.rename_axis("timestamp").reset_index(), on="timestamp", how="left").fillna({"equity_bar_return": 0.0})
|
||||
results: dict[str, dict[str, float]] = {}
|
||||
for regime, chunk in frame.groupby("strategic_regime"):
|
||||
eq = (1.0 + chunk["equity_bar_return"]).cumprod()
|
||||
eq.index = pd.Index(chunk["timestamp"], name="timestamp")
|
||||
total_return = float(eq.iloc[-1] - 1.0) if not eq.empty else 0.0
|
||||
results[str(regime)] = {
|
||||
"bars": int(len(chunk)),
|
||||
"bar_share": float(len(chunk) / len(frame)) if len(frame) else 0.0,
|
||||
"total_return": total_return,
|
||||
"sharpe": sharpe_ratio(eq, bars_per_day),
|
||||
"max_drawdown": max_drawdown(eq),
|
||||
"positive_bar_ratio": float((chunk["equity_bar_return"] > 0).mean()) if len(chunk) else 0.0,
|
||||
}
|
||||
return results
|
||||
|
||||
|
||||
def regime_score(metrics: dict[str, float]) -> float:
|
||||
total_return = float(metrics["total_return"])
|
||||
max_dd = abs(float(metrics["max_drawdown"]))
|
||||
sharpe = float(metrics["sharpe"])
|
||||
bar_share = float(metrics["bar_share"])
|
||||
return 1.8 * (total_return / max(max_dd, 0.01)) + 0.8 * sharpe + 0.25 * total_return + 0.15 * bar_share
|
||||
|
||||
|
||||
def main() -> None:
|
||||
base = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
|
||||
start = end - pd.Timedelta(days=1825 + base.warmup_days + 14)
|
||||
bundle, latest_bar, accepted, rejected, quote_by_symbol = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=True,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=min(variant.liquidity_floor for variant in FILTER_VARIANTS),
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
raw_start = eval_start - pd.Timedelta(days=base.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
regime_frame = build_strategic_regime_frame(sliced, eval_start, latest_bar)
|
||||
|
||||
variant_rows = []
|
||||
regime_rankings: dict[str, list[dict[str, object]]] = {}
|
||||
for variant in FILTER_VARIANTS:
|
||||
cfg = build_variant_config(variant)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
curve = result.equity_curve.loc[result.equity_curve.index >= regime_frame["timestamp"].iloc[0]]
|
||||
metrics = regime_metrics_from_equity(curve, regime_frame, backtester.engine_config.bars_per_day)
|
||||
total = {
|
||||
"total_return": float(result.total_return),
|
||||
"cagr": float(result.cagr),
|
||||
"sharpe": float(result.sharpe),
|
||||
"max_drawdown": float(result.max_drawdown),
|
||||
"total_trades": int(result.total_trades),
|
||||
}
|
||||
variant_rows.append(
|
||||
{
|
||||
"variant": asdict(variant),
|
||||
"total": total,
|
||||
"regimes": metrics,
|
||||
}
|
||||
)
|
||||
for regime_name, regime_metrics in metrics.items():
|
||||
regime_rankings.setdefault(regime_name, []).append(
|
||||
{
|
||||
"variant_key": variant.key,
|
||||
"variant_label": variant.label,
|
||||
"score": regime_score(regime_metrics),
|
||||
"metrics": regime_metrics,
|
||||
"total": total,
|
||||
}
|
||||
)
|
||||
|
||||
for regime_name, rows in regime_rankings.items():
|
||||
rows.sort(key=lambda row: float(row["score"]), reverse=True)
|
||||
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"analysis": "regime_filter_fit",
|
||||
"profile": PROFILE_V7_DEFAULT,
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"accepted_symbols": accepted,
|
||||
"rejected_symbols": rejected,
|
||||
"quote_by_symbol": quote_by_symbol,
|
||||
"regime_distribution": (
|
||||
regime_frame["strategic_regime"]
|
||||
.value_counts(normalize=False)
|
||||
.sort_index()
|
||||
.rename_axis("regime")
|
||||
.reset_index(name="bars")
|
||||
.to_dict(orient="records")
|
||||
),
|
||||
"variant_rows": variant_rows,
|
||||
"regime_rankings": regime_rankings,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
|
||||
lines = [
|
||||
"# Strategy32 레짐별 필터 적합도 분석",
|
||||
"",
|
||||
"## 1. 목적",
|
||||
"",
|
||||
"유니버스 필터를 단일 기본값으로 고정하지 않고, 전략적으로 나눈 `5개 레짐`마다 어떤 필터가 더 잘 작동하는지 본다.",
|
||||
"",
|
||||
"## 2. 전략형 5개 레짐",
|
||||
"",
|
||||
"- `MOMENTUM_EXPANSION`: 장기/단기 추세가 위이고 breadth가 넓게 살아있는 구간",
|
||||
"- `EUPHORIC_BREAKOUT`: breadth와 funding이 과열된 추세 확장 구간",
|
||||
"- `CHOPPY_ROTATION`: 뚜렷한 추세가 없고 자금이 순환하는 박스권",
|
||||
"- `DISTRIBUTION_DRIFT`: breadth와 funding이 식으며 약세로 기우는 구간",
|
||||
"- `CAPITULATION_STRESS`: 고변동/급락/광범위 붕괴 구간",
|
||||
"",
|
||||
"## 3. 비교한 필터 후보",
|
||||
"",
|
||||
]
|
||||
for variant in FILTER_VARIANTS:
|
||||
lines.append(
|
||||
f"- `{variant.key}`: liq `${variant.liquidity_floor/1_000_000:.0f}M`, "
|
||||
f"avg `${variant.avg_dollar_floor/1_000_000:.0f}M`, "
|
||||
f"score `{variant.momentum_score:.2f}`, rs `{variant.relative_strength:.2f}`, "
|
||||
f"7d `{variant.ret7:.2f}`, corr `{variant.corr_cap:.2f}`"
|
||||
)
|
||||
|
||||
lines.extend(["", "## 4. 레짐 분포", ""])
|
||||
dist_frame = pd.DataFrame(payload["regime_distribution"])
|
||||
if not dist_frame.empty:
|
||||
total_bars = int(dist_frame["bars"].sum())
|
||||
lines.append("| 레짐 | bars | 비중 |")
|
||||
lines.append("|---|---:|---:|")
|
||||
for row in dist_frame.itertuples(index=False):
|
||||
lines.append(f"| `{row.regime}` | `{row.bars}` | `{row.bars / total_bars:.1%}` |")
|
||||
|
||||
lines.extend(["", "## 5. 레짐별 1위 필터", ""])
|
||||
for regime_name in sorted(regime_rankings):
|
||||
best = regime_rankings[regime_name][0]
|
||||
metrics = best["metrics"]
|
||||
lines.append(f"### {regime_name}")
|
||||
lines.append("")
|
||||
lines.append(f"- Best: `{best['variant_key']}` ({best['variant_label']})")
|
||||
lines.append(f"- Regime return: `{metrics['total_return'] * 100:.2f}%`")
|
||||
lines.append(f"- Regime MDD: `{metrics['max_drawdown'] * 100:.2f}%`")
|
||||
lines.append(f"- Regime Sharpe: `{metrics['sharpe']:.2f}`")
|
||||
lines.append(f"- Positive bar ratio: `{metrics['positive_bar_ratio']:.2%}`")
|
||||
lines.append("")
|
||||
|
||||
lines.extend(["## 6. 필터별 전체 5y 결과", "", "| 필터 | 5y 수익률 | CAGR | MDD | Sharpe | 거래수 |", "|---|---:|---:|---:|---:|---:|"])
|
||||
for row in sorted(variant_rows, key=lambda item: float(item["total"]["cagr"]), reverse=True):
|
||||
total = row["total"]
|
||||
lines.append(
|
||||
f"| `{row['variant']['key']}` | `{total['total_return'] * 100:.2f}%` | `{total['cagr'] * 100:.2f}%` | "
|
||||
f"`{total['max_drawdown'] * 100:.2f}%` | `{total['sharpe']:.2f}` | `{total['total_trades']}` |"
|
||||
)
|
||||
|
||||
lines.extend(["", "## 7. 해석", ""])
|
||||
for regime_name in sorted(regime_rankings):
|
||||
top_two = regime_rankings[regime_name][:2]
|
||||
summary = ", ".join(
|
||||
f"`{row['variant_key']}` ({row['metrics']['total_return'] * 100:.1f}%, MDD {row['metrics']['max_drawdown'] * 100:.1f}%)"
|
||||
for row in top_two
|
||||
)
|
||||
lines.append(f"- `{regime_name}`: 상위 후보는 {summary}")
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
"## 8. 원본 결과",
|
||||
"",
|
||||
f"- JSON: [{OUT_JSON}]({OUT_JSON})",
|
||||
]
|
||||
)
|
||||
OUT_MD.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
||||
print(f"wrote {OUT_JSON}")
|
||||
print(f"wrote {OUT_MD}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
296
scripts/run_relaxed_macro_scaling_search.py
Normal file
296
scripts/run_relaxed_macro_scaling_search.py
Normal file
@@ -0,0 +1,296 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import asdict
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy32.live.runtime import BEST_CASH_OVERLAY, LIVE_STRATEGY_OVERRIDES
|
||||
from strategy32.research.soft_router import (
|
||||
MacroScaleSpec,
|
||||
build_cash_overlay_period_components,
|
||||
compose_cash_overlay_curve,
|
||||
load_component_bundle,
|
||||
score_candidate,
|
||||
segment_metrics,
|
||||
)
|
||||
|
||||
|
||||
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
|
||||
OUT_JSON = Path("/tmp/strategy32_relaxed_macro_scaling_search.json")
|
||||
|
||||
RELAXED_OVERHEAT_OVERRIDES = {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"momentum_min_score": 0.58,
|
||||
"momentum_min_relative_strength": -0.03,
|
||||
"momentum_min_7d_return": 0.00,
|
||||
"universe_min_avg_dollar_volume": 75_000_000.0,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
}
|
||||
|
||||
CURRENT_OVERHEAT_OVERRIDES = {
|
||||
**LIVE_STRATEGY_OVERRIDES,
|
||||
"hard_filter_refresh_cadence": "1d",
|
||||
"hard_filter_min_history_bars": 120,
|
||||
"hard_filter_lookback_bars": 30,
|
||||
"hard_filter_min_avg_dollar_volume": 50_000_000.0,
|
||||
}
|
||||
|
||||
WINDOWS = (
|
||||
(365, "1y"),
|
||||
(730, "2y"),
|
||||
(1095, "3y"),
|
||||
(1460, "4y"),
|
||||
(1825, "5y"),
|
||||
)
|
||||
|
||||
YEAR_PERIODS = (
|
||||
("2021", pd.Timestamp("2021-03-16 04:00:00+00:00"), pd.Timestamp("2022-01-01 00:00:00+00:00")),
|
||||
("2022", pd.Timestamp("2022-01-01 00:00:00+00:00"), pd.Timestamp("2023-01-01 00:00:00+00:00")),
|
||||
("2023", pd.Timestamp("2023-01-01 00:00:00+00:00"), pd.Timestamp("2024-01-01 00:00:00+00:00")),
|
||||
("2024", pd.Timestamp("2024-01-01 00:00:00+00:00"), pd.Timestamp("2025-01-01 00:00:00+00:00")),
|
||||
("2025", pd.Timestamp("2025-01-01 00:00:00+00:00"), pd.Timestamp("2026-01-01 00:00:00+00:00")),
|
||||
)
|
||||
YTD_START = pd.Timestamp("2026-01-01 00:00:00+00:00")
|
||||
|
||||
|
||||
def _clip01(value: float) -> float:
|
||||
return min(max(float(value), 0.0), 1.0)
|
||||
|
||||
|
||||
def _ramp(value: float, start: float, end: float) -> float:
|
||||
if end == start:
|
||||
return 1.0 if value >= end else 0.0
|
||||
if value <= start:
|
||||
return 0.0
|
||||
if value >= end:
|
||||
return 1.0
|
||||
return (value - start) / (end - start)
|
||||
|
||||
|
||||
def _build_macro_scale_map(sliced_bundle, *, timestamps: list[pd.Timestamp], spec: MacroScaleSpec) -> pd.Series:
|
||||
btc_prices = sliced_bundle.prices["BTC"]
|
||||
closes = btc_prices.set_index("timestamp")["close"].astype(float).sort_index()
|
||||
daily = closes.resample("1D").last().dropna()
|
||||
weekly = daily.resample("W-SUN").last().dropna()
|
||||
fast = weekly.ewm(span=spec.fast_weeks, adjust=False).mean()
|
||||
slow = weekly.ewm(span=spec.slow_weeks, adjust=False).mean()
|
||||
close_scale = (weekly / slow - 1.0).apply(lambda value: _ramp(float(value), spec.close_gap_start, spec.close_gap_full))
|
||||
fast_scale = (fast / slow - 1.0).apply(lambda value: _ramp(float(value), spec.fast_gap_start, spec.fast_gap_full))
|
||||
blended = spec.close_weight * close_scale + (1.0 - spec.close_weight) * fast_scale
|
||||
macro_scale = spec.floor + (1.0 - spec.floor) * blended.clip(0.0, 1.0)
|
||||
aligned = macro_scale.reindex(pd.DatetimeIndex(timestamps, name="timestamp"), method="ffill")
|
||||
return aligned.fillna(1.0).clip(spec.floor, 1.0).astype(float)
|
||||
|
||||
|
||||
def _candidate_specs() -> list[MacroScaleSpec]:
|
||||
specs: list[MacroScaleSpec] = []
|
||||
for floor in (0.25, 0.35, 0.45):
|
||||
for close_gap_start, close_gap_full in ((-0.08, 0.02), (-0.06, 0.02), (-0.05, 0.04)):
|
||||
for fast_gap_start, fast_gap_full in ((-0.04, 0.01), (-0.03, 0.02)):
|
||||
for close_weight in (0.55, 0.65):
|
||||
specs.append(
|
||||
MacroScaleSpec(
|
||||
floor=floor,
|
||||
close_gap_start=close_gap_start,
|
||||
close_gap_full=close_gap_full,
|
||||
fast_gap_start=fast_gap_start,
|
||||
fast_gap_full=fast_gap_full,
|
||||
close_weight=close_weight,
|
||||
)
|
||||
)
|
||||
return specs
|
||||
|
||||
|
||||
def _collect_metrics(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
for days, label in WINDOWS:
|
||||
start = latest_bar - pd.Timedelta(days=days)
|
||||
window_results[label] = segment_metrics(curve, start, latest_bar)
|
||||
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
year_results[label] = segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
|
||||
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return window_results, year_results, score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _evaluate_exact_sequential(
|
||||
bundle,
|
||||
latest_bar: pd.Timestamp,
|
||||
*,
|
||||
core_overrides: dict[str, object],
|
||||
macro_scale_spec: MacroScaleSpec | None,
|
||||
) -> dict[str, object]:
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
|
||||
periods = [
|
||||
*(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar) for days, label in WINDOWS),
|
||||
*(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))) for label, start, end_exclusive in YEAR_PERIODS),
|
||||
("year", "2026_YTD", YTD_START, latest_bar),
|
||||
]
|
||||
|
||||
latest_weights: list[dict[str, object]] = []
|
||||
for kind, label, start, end in periods:
|
||||
components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=start,
|
||||
eval_end=end,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=core_overrides,
|
||||
macro_scale_spec=macro_scale_spec,
|
||||
)
|
||||
curve, weights = compose_cash_overlay_curve(candidate=BEST_CASH_OVERLAY, **components)
|
||||
metrics = segment_metrics(curve, start, end)
|
||||
if kind == "window":
|
||||
window_results[label] = metrics
|
||||
else:
|
||||
year_results[label] = metrics
|
||||
if label == "2026_YTD":
|
||||
latest_weights = weights.tail(1).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
{label: window_results[label] for _, label in WINDOWS},
|
||||
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
|
||||
)
|
||||
return {
|
||||
"candidate": asdict(BEST_CASH_OVERLAY),
|
||||
"core_overrides": core_overrides,
|
||||
"macro_scale_spec": asdict(macro_scale_spec) if macro_scale_spec is not None else None,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"latest_weights": latest_weights,
|
||||
"validation": "exact_independent_periods_cash_overlay_sequential",
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle(CACHE_PATH)
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
sliced = slice_bundle(bundle, eval_start - pd.Timedelta(days=365), latest_bar)
|
||||
print("[phase] build relaxed core components", flush=True)
|
||||
|
||||
relaxed_components = build_cash_overlay_period_components(
|
||||
bundle=bundle,
|
||||
eval_start=eval_start,
|
||||
eval_end=latest_bar,
|
||||
profile_name=BEST_CASH_OVERLAY.regime_profile,
|
||||
core_filter=BEST_CASH_OVERLAY.core_filter,
|
||||
cap_engine=BEST_CASH_OVERLAY.cap_engine,
|
||||
chop_engine=BEST_CASH_OVERLAY.chop_engine,
|
||||
dist_engine=BEST_CASH_OVERLAY.dist_engine,
|
||||
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
||||
)
|
||||
print("[phase] search macro specs", flush=True)
|
||||
|
||||
search_rows: list[dict[str, object]] = []
|
||||
specs = _candidate_specs()
|
||||
for idx, spec in enumerate(specs, start=1):
|
||||
macro_scale_map = _build_macro_scale_map(sliced, timestamps=relaxed_components["timestamps"][:-1], spec=spec)
|
||||
curve, _weights = compose_cash_overlay_curve(
|
||||
candidate=BEST_CASH_OVERLAY,
|
||||
timestamps=relaxed_components["timestamps"],
|
||||
score_frame=relaxed_components["score_frame"],
|
||||
core_returns=relaxed_components["core_returns"],
|
||||
core_exposure_frame=relaxed_components["core_exposure_frame"],
|
||||
cap_returns=relaxed_components["cap_returns"],
|
||||
chop_returns=relaxed_components["chop_returns"],
|
||||
dist_returns=relaxed_components["dist_returns"],
|
||||
macro_scale_map=macro_scale_map,
|
||||
)
|
||||
windows, years, score, negative_years, mdd_violations = _collect_metrics(curve, latest_bar)
|
||||
search_rows.append(
|
||||
{
|
||||
"macro_scale_spec": asdict(spec),
|
||||
"windows": windows,
|
||||
"years": years,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
}
|
||||
)
|
||||
if idx % 6 == 0 or idx == len(specs):
|
||||
print(f"[search] {idx}/{len(specs)}", flush=True)
|
||||
|
||||
search_rows.sort(key=lambda row: float(row["score"]), reverse=True)
|
||||
top_search = search_rows[:5]
|
||||
search_only = os.getenv("STRATEGY32_SEARCH_ONLY", "").strip().lower() in {"1", "true", "yes", "on"}
|
||||
if search_only:
|
||||
payload = {
|
||||
"analysis": "relaxed_overheat_macro_scaling_search",
|
||||
"mode": "search_only",
|
||||
"latest_bar": str(latest_bar),
|
||||
"core_filter": "relaxed_overheat",
|
||||
"candidate": asdict(BEST_CASH_OVERLAY),
|
||||
"search_top": top_search,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(json.dumps(payload, indent=2))
|
||||
print(f"[saved] {OUT_JSON}")
|
||||
return
|
||||
print("[phase] exact baselines", flush=True)
|
||||
|
||||
baselines = {
|
||||
"current_overheat": _evaluate_exact_sequential(
|
||||
bundle,
|
||||
latest_bar,
|
||||
core_overrides=CURRENT_OVERHEAT_OVERRIDES,
|
||||
macro_scale_spec=None,
|
||||
),
|
||||
"relaxed_overheat": _evaluate_exact_sequential(
|
||||
bundle,
|
||||
latest_bar,
|
||||
core_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
||||
macro_scale_spec=None,
|
||||
),
|
||||
}
|
||||
|
||||
best_spec = MacroScaleSpec(**top_search[0]["macro_scale_spec"])
|
||||
print(f"[phase] exact best spec {best_spec.name}", flush=True)
|
||||
best_exact = _evaluate_exact_sequential(
|
||||
bundle,
|
||||
latest_bar,
|
||||
core_overrides=RELAXED_OVERHEAT_OVERRIDES,
|
||||
macro_scale_spec=best_spec,
|
||||
)
|
||||
|
||||
payload = {
|
||||
"analysis": "relaxed_overheat_macro_scaling_search",
|
||||
"latest_bar": str(latest_bar),
|
||||
"core_filter": "relaxed_overheat",
|
||||
"candidate": asdict(BEST_CASH_OVERLAY),
|
||||
"baselines": baselines,
|
||||
"search_top": top_search,
|
||||
"best_exact": best_exact,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print(json.dumps(payload, indent=2))
|
||||
print(f"[saved] {OUT_JSON}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
319
scripts/run_soft_router_search.py
Normal file
319
scripts/run_soft_router_search.py
Normal file
@@ -0,0 +1,319 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import sys
|
||||
from dataclasses import asdict
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
from strategy29.backtest.metrics import max_drawdown, sharpe_ratio
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.research.adverse_regime import AdverseRegimeResearchHarness
|
||||
from strategy32.research.hybrid_regime import _curve_returns, _run_adverse_component_curve
|
||||
from strategy32.research.soft_router import (
|
||||
WINDOWS,
|
||||
YEAR_PERIODS,
|
||||
YTD_START,
|
||||
SoftRouterCandidate,
|
||||
build_regime_score_frame,
|
||||
compose_soft_router_curve,
|
||||
evaluate_candidate_exact,
|
||||
load_component_bundle,
|
||||
score_candidate,
|
||||
segment_metrics,
|
||||
)
|
||||
from strategy32.research.hybrid_regime import STATIC_FILTERS
|
||||
|
||||
|
||||
OUT_JSON = Path("/tmp/strategy32_soft_router_search.json")
|
||||
OUT_MD = Path("/Volumes/SSD/data/nextcloud/data/tara/files/📂HeadOffice/money-bot/strategy32/015_soft_router_탐색결과.md")
|
||||
|
||||
PROFILES = ("loose_positive",)
|
||||
CORE_FILTERS = ("overheat_tolerant", "prev_balanced")
|
||||
CAP_ENGINES = ("cap_btc_rebound",)
|
||||
CHOP_ENGINES = ("chop_inverse_carry", "chop_inverse_carry_strict")
|
||||
DIST_ENGINES = ("dist_inverse_carry_strict",)
|
||||
CORE_FLOORS = (0.00, 0.10, 0.20)
|
||||
CAP_MAX_WEIGHTS = (0.20, 0.35, 0.50)
|
||||
CHOP_MAX_WEIGHTS = (0.10, 0.20, 0.35)
|
||||
DIST_MAX_WEIGHTS = (0.10, 0.20, 0.35)
|
||||
CHOP_BLEND_FLOORS = (0.00, 0.10, 0.20)
|
||||
|
||||
|
||||
def _evaluate_from_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
|
||||
window_results = {
|
||||
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
|
||||
for days, label in WINDOWS
|
||||
}
|
||||
year_results = {
|
||||
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
|
||||
for label, start, end_exclusive in YEAR_PERIODS
|
||||
}
|
||||
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
window_results,
|
||||
{k: v for k, v in year_results.items() if k != "2026_YTD"},
|
||||
)
|
||||
return window_results, year_results, score, negative_years, mdd_violations
|
||||
|
||||
|
||||
def _exact_static_variant(bundle, latest_bar: pd.Timestamp, filter_name: str) -> dict[str, object]:
|
||||
window_results: dict[str, dict[str, float]] = {}
|
||||
year_results: dict[str, dict[str, float]] = {}
|
||||
|
||||
for days, label in WINDOWS:
|
||||
eval_start = latest_bar - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
window_results[label] = segment_metrics(curve, eval_start, latest_bar)
|
||||
|
||||
for label, start, end_exclusive in YEAR_PERIODS:
|
||||
eval_end = min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))
|
||||
raw_start = start - pd.Timedelta(days=90)
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
curve = backtester.run().equity_curve.loc[lambda s: s.index >= start]
|
||||
year_results[label] = segment_metrics(curve, start, eval_end)
|
||||
|
||||
raw_start = YTD_START - pd.Timedelta(days=90)
|
||||
from strategy29.backtest.window_analysis import slice_bundle
|
||||
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[filter_name])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=YTD_START)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
curve = backtester.run().equity_curve.loc[lambda s: s.index >= YTD_START]
|
||||
year_results["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
|
||||
|
||||
score, negative_years, mdd_violations = score_candidate(
|
||||
window_results,
|
||||
{k: v for k, v in year_results.items() if k != "2026_YTD"},
|
||||
)
|
||||
return {
|
||||
"name": filter_name,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"validation": "exact_static_variant",
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
bundle, latest_bar = load_component_bundle()
|
||||
eval_start = latest_bar - pd.Timedelta(days=1825)
|
||||
raw_start = eval_start - pd.Timedelta(days=90)
|
||||
sliced = slice_bundle(bundle, raw_start, latest_bar)
|
||||
precomputed: dict[str, object] = {"profiles": {}}
|
||||
|
||||
for profile_name in PROFILES:
|
||||
score_frame = build_regime_score_frame(sliced, eval_start, latest_bar, profile_name=profile_name)
|
||||
harness = AdverseRegimeResearchHarness(sliced, latest_bar)
|
||||
timestamps = sorted(sliced.prices["BTC"]["timestamp"].loc[sliced.prices["BTC"]["timestamp"] >= eval_start].tolist())
|
||||
core_returns: dict[str, pd.Series] = {}
|
||||
adverse_returns: dict[str, pd.Series] = {}
|
||||
|
||||
for core_filter in CORE_FILTERS:
|
||||
cfg = build_strategy32_config(PROFILE_V7_DEFAULT, **STATIC_FILTERS[core_filter])
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
core_curve = backtester.run().equity_curve.loc[lambda s: s.index >= eval_start]
|
||||
core_returns[core_filter] = _curve_returns(core_curve)
|
||||
print(f"[cache core] {profile_name}|{core_filter}", flush=True)
|
||||
|
||||
for engine_name in sorted(set(CAP_ENGINES) | set(CHOP_ENGINES) | set(DIST_ENGINES)):
|
||||
adverse_curve = _run_adverse_component_curve(
|
||||
eval_start=eval_start,
|
||||
engine_name=engine_name,
|
||||
harness=harness,
|
||||
regime_frame=score_frame,
|
||||
)
|
||||
adverse_returns[engine_name] = _curve_returns(adverse_curve)
|
||||
print(f"[cache adverse] {profile_name}|{engine_name}", flush=True)
|
||||
|
||||
precomputed["profiles"][profile_name] = {
|
||||
"score_frame": score_frame,
|
||||
"timestamps": timestamps,
|
||||
"core_returns": core_returns,
|
||||
"adverse_returns": adverse_returns,
|
||||
}
|
||||
|
||||
candidates = [
|
||||
SoftRouterCandidate(*combo)
|
||||
for combo in itertools.product(
|
||||
PROFILES,
|
||||
CORE_FILTERS,
|
||||
CAP_ENGINES,
|
||||
CHOP_ENGINES,
|
||||
DIST_ENGINES,
|
||||
CORE_FLOORS,
|
||||
CAP_MAX_WEIGHTS,
|
||||
CHOP_MAX_WEIGHTS,
|
||||
DIST_MAX_WEIGHTS,
|
||||
CHOP_BLEND_FLOORS,
|
||||
)
|
||||
]
|
||||
|
||||
approx_rows: list[dict[str, object]] = []
|
||||
for idx, candidate in enumerate(candidates, start=1):
|
||||
profile_cache = precomputed["profiles"][candidate.regime_profile]
|
||||
components = {
|
||||
"timestamps": profile_cache["timestamps"],
|
||||
"score_frame": profile_cache["score_frame"],
|
||||
"core_returns": profile_cache["core_returns"][candidate.core_filter],
|
||||
"cap_returns": profile_cache["adverse_returns"][candidate.cap_engine],
|
||||
"chop_returns": profile_cache["adverse_returns"][candidate.chop_engine],
|
||||
"dist_returns": profile_cache["adverse_returns"][candidate.dist_engine],
|
||||
}
|
||||
curve, weights = compose_soft_router_curve(candidate=candidate, **components)
|
||||
window_results, year_results, score, negative_years, mdd_violations = _evaluate_from_curve(curve, latest_bar)
|
||||
approx_rows.append(
|
||||
{
|
||||
"candidate": asdict(candidate),
|
||||
"name": candidate.name,
|
||||
"score": score,
|
||||
"negative_years": negative_years,
|
||||
"mdd_violations": mdd_violations,
|
||||
"windows": window_results,
|
||||
"years": year_results,
|
||||
"avg_weights": {
|
||||
"core": float(weights["core_weight"].mean()),
|
||||
"cap": float(weights["cap_weight"].mean()),
|
||||
"chop": float(weights["chop_weight"].mean()),
|
||||
"dist": float(weights["dist_weight"].mean()),
|
||||
"cash": float(weights["cash_weight"].mean()),
|
||||
},
|
||||
"validation": "approx_full_curve_slice",
|
||||
}
|
||||
)
|
||||
if idx % 100 == 0 or idx == len(candidates):
|
||||
print(
|
||||
f"[approx {idx:04d}/{len(candidates)}] top={approx_rows[-1]['name']} "
|
||||
f"1y={window_results['1y']['total_return'] * 100:.2f}% "
|
||||
f"5y_ann={window_results['5y']['annualized_return'] * 100:.2f}%",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
approx_rows.sort(key=lambda row: (int(row["negative_years"]), int(row["mdd_violations"]), -float(row["score"])))
|
||||
|
||||
exact_top = []
|
||||
for row in approx_rows[:3]:
|
||||
candidate = SoftRouterCandidate(**row["candidate"])
|
||||
print(f"[exact-start] {candidate.name}", flush=True)
|
||||
result = evaluate_candidate_exact(bundle=bundle, latest_bar=latest_bar, candidate=candidate)
|
||||
exact_top.append(result)
|
||||
exact_top.sort(key=lambda item: (int(item["negative_years"]), int(item["mdd_violations"]), -float(item["score"])))
|
||||
print(
|
||||
f"[exact] {candidate.name} 1y={result['windows']['1y']['total_return'] * 100:.2f}% "
|
||||
f"5y_ann={result['windows']['5y']['annualized_return'] * 100:.2f}% "
|
||||
f"neg={result['negative_years']} mdd_viol={result['mdd_violations']}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
static_exact = [_exact_static_variant(bundle, latest_bar, filter_name) for filter_name in CORE_FILTERS]
|
||||
|
||||
payload = {
|
||||
"analysis": "strategy32_soft_router_search",
|
||||
"latest_completed_bar": str(latest_bar),
|
||||
"candidate_count": len(candidates),
|
||||
"component_cache_count": sum(
|
||||
len(profile_cache["core_returns"]) + len(profile_cache["adverse_returns"])
|
||||
for profile_cache in precomputed["profiles"].values()
|
||||
),
|
||||
"summary": approx_rows[:20],
|
||||
"exact_top": exact_top,
|
||||
"exact_static": static_exact,
|
||||
}
|
||||
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
|
||||
lines = [
|
||||
"# Strategy32 Soft Router 탐색결과",
|
||||
"",
|
||||
"## 1. 목적",
|
||||
"",
|
||||
"`5개 하드 레짐 -> 1엔진 선택` 구조를 버리고, `정적 코어 엔진 + adverse overlay` 구조를 연속형 점수 기반으로 탐색한다.",
|
||||
"",
|
||||
"## 2. 탐색 범위",
|
||||
"",
|
||||
f"- profiles: `{', '.join(PROFILES)}`",
|
||||
f"- core filters: `{', '.join(CORE_FILTERS)}`",
|
||||
f"- cap engines: `{', '.join(CAP_ENGINES)}`",
|
||||
f"- chop engines: `{', '.join(CHOP_ENGINES)}`",
|
||||
f"- dist engines: `{', '.join(DIST_ENGINES)}`",
|
||||
f"- total candidates: `{len(candidates)}`",
|
||||
"",
|
||||
"## 3. exact 상위 후보",
|
||||
"",
|
||||
"| rank | candidate | 1y | 2y ann | 3y ann | 4y ann | 5y ann | 5y MDD | 2025 | 2024 |",
|
||||
"|---|---|---:|---:|---:|---:|---:|---:|---:|---:|",
|
||||
]
|
||||
for idx, row in enumerate(exact_top, start=1):
|
||||
lines.append(
|
||||
f"| `{idx}` | `{row['name']}` | `{row['windows']['1y']['total_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['2y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['3y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['4y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['5y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['5y']['max_drawdown'] * 100:.2f}%` | "
|
||||
f"`{row['years']['2025']['total_return'] * 100:.2f}%` | "
|
||||
f"`{row['years']['2024']['total_return'] * 100:.2f}%` |"
|
||||
)
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
"## 4. 정적 코어 exact 비교",
|
||||
"",
|
||||
"| core filter | 1y | 2y ann | 3y ann | 4y ann | 5y ann | 5y MDD | 2025 | 2024 |",
|
||||
"|---|---:|---:|---:|---:|---:|---:|---:|---:|",
|
||||
]
|
||||
)
|
||||
for row in static_exact:
|
||||
lines.append(
|
||||
f"| `{row['name']}` | `{row['windows']['1y']['total_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['2y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['3y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['4y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['5y']['annualized_return'] * 100:.2f}%` | "
|
||||
f"`{row['windows']['5y']['max_drawdown'] * 100:.2f}%` | "
|
||||
f"`{row['years']['2025']['total_return'] * 100:.2f}%` | "
|
||||
f"`{row['years']['2024']['total_return'] * 100:.2f}%` |"
|
||||
)
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
"## 5. 해석",
|
||||
"",
|
||||
"- soft router가 정적 코어보다 좋아지려면, adverse overlay가 `2024/2025 방어`를 만들어내면서 `5y CAGR`을 크게 훼손하지 않아야 한다.",
|
||||
"- exact 결과가 정적 코어보다 약하면, 현재 adverse overlay 신호 품질 또는 overlay weight 공식이 아직 최적이 아니라는 뜻이다.",
|
||||
"",
|
||||
"## 6. 원본 결과",
|
||||
"",
|
||||
f"- JSON: [{OUT_JSON}]({OUT_JSON})",
|
||||
]
|
||||
)
|
||||
OUT_MD.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
144
scripts/run_v7_branch_validation.py
Normal file
144
scripts/run_v7_branch_validation.py
Normal file
@@ -0,0 +1,144 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
|
||||
if str(PACKAGE_PARENT) not in sys.path:
|
||||
sys.path.insert(0, str(PACKAGE_PARENT))
|
||||
|
||||
from strategy29.backtest.window_analysis import evaluate_window_result, slice_bundle
|
||||
from strategy32.backtest.simulator import Strategy32Backtester
|
||||
from strategy32.config import PROFILE_V7_DEFAULT, build_strategy32_config
|
||||
from strategy32.data import build_strategy32_market_bundle
|
||||
|
||||
|
||||
WINDOWS = [(7, "1w"), (30, "1m"), (365, "1y"), (1095, "3y"), (1825, "5y")]
|
||||
|
||||
|
||||
def balanced_score(results: dict[str, dict[str, float | int | str]]) -> float:
|
||||
score = 0.0
|
||||
for label, weight in (("1y", 1.0), ("3y", 1.0), ("5y", 1.2)):
|
||||
annualized = float(results[label]["annualized_return"])
|
||||
drawdown = abs(float(results[label]["max_drawdown"]))
|
||||
score += weight * (annualized / max(drawdown, 0.01))
|
||||
score += 0.15 * float(results["1m"]["total_return"])
|
||||
return score
|
||||
|
||||
|
||||
def main() -> None:
|
||||
base = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
end = pd.Timestamp("2026-03-15 00:00:00", tz="UTC")
|
||||
start = end - pd.Timedelta(days=max(days for days, _ in WINDOWS) + base.warmup_days + 14)
|
||||
|
||||
variants: list[tuple[str, dict[str, bool]]] = [
|
||||
("v7_default", {}),
|
||||
("v7_plus_expanded_hedge", {"enable_expanded_hedge": True}),
|
||||
("v7_plus_max_holding_exit", {"enable_max_holding_exit": True}),
|
||||
("v7_plus_expanded_hedge_plus_max_holding_exit", {"enable_expanded_hedge": True, "enable_max_holding_exit": True}),
|
||||
]
|
||||
|
||||
print("fetching bundle...")
|
||||
bundle, latest_completed_bar, accepted_symbols, rejected_symbols, quote_by_symbol = build_strategy32_market_bundle(
|
||||
symbols=base.symbols,
|
||||
auto_discover_symbols=base.auto_discover_symbols,
|
||||
quote_assets=base.quote_assets,
|
||||
excluded_base_assets=base.excluded_base_assets,
|
||||
min_quote_volume_24h=base.discovery_min_quote_volume_24h,
|
||||
start=start,
|
||||
end=end,
|
||||
timeframe=base.timeframe,
|
||||
max_staleness_days=base.max_symbol_staleness_days,
|
||||
)
|
||||
print("latest", latest_completed_bar)
|
||||
|
||||
results: dict[str, dict[str, dict[str, float | int | str]]] = {}
|
||||
summary_rows: list[dict[str, float | int | str]] = []
|
||||
for name, overrides in variants:
|
||||
cfg = copy.deepcopy(base)
|
||||
for attr, value in overrides.items():
|
||||
setattr(cfg, attr, value)
|
||||
variant_results = {}
|
||||
print(f"\nVARIANT {name}")
|
||||
for days, label in WINDOWS:
|
||||
eval_end = latest_completed_bar
|
||||
eval_start = eval_end - pd.Timedelta(days=days)
|
||||
raw_start = eval_start - pd.Timedelta(days=cfg.warmup_days)
|
||||
sliced = slice_bundle(bundle, raw_start, eval_end)
|
||||
backtester = Strategy32Backtester(cfg, sliced, trade_start=eval_start)
|
||||
backtester.engine_config.initial_capital = 1000.0
|
||||
result = backtester.run()
|
||||
metrics = evaluate_window_result(result, eval_start=eval_start, bars_per_day=backtester.engine_config.bars_per_day)
|
||||
metrics["engine_pnl"] = result.engine_pnl
|
||||
metrics["total_trades"] = result.total_trades
|
||||
variant_results[label] = metrics
|
||||
print(
|
||||
label,
|
||||
"ret",
|
||||
round(float(metrics["total_return"]) * 100, 2),
|
||||
"mdd",
|
||||
round(float(metrics["max_drawdown"]) * 100, 2),
|
||||
"sharpe",
|
||||
round(float(metrics["sharpe"]), 2),
|
||||
"trades",
|
||||
metrics["trade_count"],
|
||||
)
|
||||
score = balanced_score(variant_results)
|
||||
results[name] = variant_results
|
||||
summary_rows.append(
|
||||
{
|
||||
"name": name,
|
||||
"balanced_score": score,
|
||||
"ret_1w": float(variant_results["1w"]["total_return"]),
|
||||
"ret_1m": float(variant_results["1m"]["total_return"]),
|
||||
"ret_1y": float(variant_results["1y"]["total_return"]),
|
||||
"ret_3y": float(variant_results["3y"]["total_return"]),
|
||||
"ret_5y": float(variant_results["5y"]["total_return"]),
|
||||
"mdd_1y": float(variant_results["1y"]["max_drawdown"]),
|
||||
"mdd_3y": float(variant_results["3y"]["max_drawdown"]),
|
||||
"mdd_5y": float(variant_results["5y"]["max_drawdown"]),
|
||||
}
|
||||
)
|
||||
|
||||
summary_rows.sort(key=lambda row: float(row["balanced_score"]), reverse=True)
|
||||
payload = {
|
||||
"strategy": "strategy32",
|
||||
"analysis": "v7_branch_validation",
|
||||
"profile": PROFILE_V7_DEFAULT,
|
||||
"initial_capital": 1000.0,
|
||||
"auto_discover_symbols": base.auto_discover_symbols,
|
||||
"latest_completed_bar": str(latest_completed_bar),
|
||||
"requested_symbols": [] if base.auto_discover_symbols else base.symbols,
|
||||
"accepted_symbols": accepted_symbols,
|
||||
"rejected_symbols": rejected_symbols,
|
||||
"quote_by_symbol": quote_by_symbol,
|
||||
"timeframe": base.timeframe,
|
||||
"results": results,
|
||||
"summary": summary_rows,
|
||||
}
|
||||
out = Path("/tmp/strategy32_v7_branch_validation.json")
|
||||
out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
print("\nRanked variants")
|
||||
for row in summary_rows:
|
||||
print(
|
||||
row["name"],
|
||||
"score",
|
||||
round(float(row["balanced_score"]), 3),
|
||||
"1y",
|
||||
round(float(row["ret_1y"]) * 100, 2),
|
||||
"3y",
|
||||
round(float(row["ret_3y"]) * 100, 2),
|
||||
"5y",
|
||||
round(float(row["ret_5y"]) * 100, 2),
|
||||
"mdd5y",
|
||||
round(float(row["mdd_5y"]) * 100, 2),
|
||||
)
|
||||
print("\nwrote", out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
signal/__init__.py
Normal file
1
signal/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from __future__ import annotations
|
||||
22
signal/router.py
Normal file
22
signal/router.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from strategy29.common.models import AllocationDecision, Regime
|
||||
from strategy29.signal.allocation_router import AllocationRouter
|
||||
from strategy32.config import Strategy32Budgets
|
||||
|
||||
|
||||
class Strategy32Router(AllocationRouter):
|
||||
def __init__(self, budgets: Strategy32Budgets):
|
||||
self.budgets = budgets
|
||||
super().__init__()
|
||||
|
||||
def decide(self, regime: Regime) -> AllocationDecision:
|
||||
momentum_budget_pct, carry_budget_pct, sideways_budget_pct = self.budgets.for_regime(regime)
|
||||
cash_budget_pct = max(0.0, 1.0 - momentum_budget_pct - carry_budget_pct - sideways_budget_pct)
|
||||
return AllocationDecision(
|
||||
regime=regime,
|
||||
momentum_budget_pct=momentum_budget_pct,
|
||||
carry_budget_pct=carry_budget_pct,
|
||||
spread_budget_pct=sideways_budget_pct,
|
||||
cash_budget_pct=cash_budget_pct,
|
||||
)
|
||||
794
tests/test_strategy32.py
Normal file
794
tests/test_strategy32.py
Normal file
@@ -0,0 +1,794 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from urllib.error import HTTPError
|
||||
from unittest.mock import patch
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from strategy29.data import binance_history
|
||||
from strategy29.common.models import MarketDataBundle, Regime
|
||||
from strategy32.backtest.simulator import Strategy32Backtester, Strategy32MomentumCarryBacktester
|
||||
from strategy32.config import PROFILE_V5_BASELINE, PROFILE_V7_DEFAULT, Strategy32Budgets, Strategy32Config, build_strategy32_config
|
||||
from strategy32.live.executor import LiveExecutionConfig, LiveFuturesExecutor
|
||||
from strategy32.live.runtime import (
|
||||
BEST_CASH_OVERLAY,
|
||||
LiveMonitorConfig,
|
||||
_capital_summary,
|
||||
_apply_weekly_macro_filter,
|
||||
_combine_targets,
|
||||
_completed_bar_time,
|
||||
_execution_refinement_states,
|
||||
_refine_execution_targets,
|
||||
_expand_core_targets,
|
||||
_heartbeat_slot,
|
||||
_overlay_signal_strengths,
|
||||
_select_live_hard_filter_symbols,
|
||||
_weekly_macro_filter_state,
|
||||
)
|
||||
from strategy32.research.soft_router import (
|
||||
CashOverlayCandidate,
|
||||
MacroScaleSpec,
|
||||
SoftRouterCandidate,
|
||||
compose_cash_overlay_curve,
|
||||
compose_soft_router_curve,
|
||||
)
|
||||
from strategy32.signal.router import Strategy32Router
|
||||
from strategy32.universe import filter_momentum_frame, limit_correlated_symbols, rank_momentum_universe, score_momentum_universe, select_dynamic_universe, select_strategic_universe
|
||||
|
||||
|
||||
def make_bundle(bars: int = 260) -> MarketDataBundle:
|
||||
timestamps = pd.date_range("2025-01-01", periods=bars, freq="4h", tz="UTC")
|
||||
prices = {
|
||||
"BTC": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"open": [100.0 + i * 0.20 for i in range(bars)],
|
||||
"high": [100.3 + i * 0.20 for i in range(bars)],
|
||||
"low": [99.8 + i * 0.20 for i in range(bars)],
|
||||
"close": [100.0 + i * 0.20 for i in range(bars)],
|
||||
"volume": [2_000_000.0] * bars,
|
||||
}
|
||||
),
|
||||
"ETH": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"open": [50.0 + i * 0.25 for i in range(bars)],
|
||||
"high": [50.3 + i * 0.25 for i in range(bars)],
|
||||
"low": [49.8 + i * 0.25 for i in range(bars)],
|
||||
"close": [50.0 + i * 0.25 for i in range(bars)],
|
||||
"volume": [2_500_000.0] * bars,
|
||||
}
|
||||
),
|
||||
"SOL": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"open": [20.0 + i * 0.18 for i in range(bars)],
|
||||
"high": [20.2 + i * 0.18 for i in range(bars)],
|
||||
"low": [19.9 + i * 0.18 for i in range(bars)],
|
||||
"close": [20.0 + i * 0.18 for i in range(bars)],
|
||||
"volume": [1_800_000.0] * bars,
|
||||
}
|
||||
),
|
||||
}
|
||||
funding = {
|
||||
"ETH": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"funding_rate": [0.00015] * bars,
|
||||
"basis": [0.0100 - i * 0.00001 for i in range(bars)],
|
||||
}
|
||||
),
|
||||
"SOL": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"funding_rate": [0.00012] * bars,
|
||||
"basis": [0.0090 - i * 0.00001 for i in range(bars)],
|
||||
}
|
||||
),
|
||||
}
|
||||
return MarketDataBundle(prices=prices, funding=funding)
|
||||
|
||||
|
||||
def make_execution_prices(bundle: MarketDataBundle, *, blocked_symbols: set[str] | None = None) -> dict[str, pd.DataFrame]:
|
||||
blocked_symbols = blocked_symbols or set()
|
||||
start = pd.Timestamp(bundle.prices["BTC"]["timestamp"].iloc[0])
|
||||
end = pd.Timestamp(bundle.prices["BTC"]["timestamp"].iloc[-1])
|
||||
timestamps = pd.date_range(start, end, freq="1h", tz="UTC")
|
||||
prices: dict[str, pd.DataFrame] = {}
|
||||
for symbol in bundle.prices:
|
||||
base = 100.0 if symbol == "BTC" else 50.0
|
||||
if symbol in blocked_symbols:
|
||||
closes = [base - i * 0.15 for i in range(len(timestamps))]
|
||||
else:
|
||||
closes = [base + i * 0.08 for i in range(len(timestamps))]
|
||||
prices[symbol] = pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"open": closes,
|
||||
"high": [value + 0.2 for value in closes],
|
||||
"low": [value - 0.2 for value in closes],
|
||||
"close": closes,
|
||||
"volume": [1_000_000.0] * len(timestamps),
|
||||
}
|
||||
)
|
||||
return prices
|
||||
|
||||
|
||||
class Strategy32Tests(unittest.TestCase):
|
||||
def test_router_disables_spread(self) -> None:
|
||||
decision = Strategy32Router(Strategy32Config().budgets).decide(Regime.STRONG_UP)
|
||||
self.assertEqual(decision.spread_budget_pct, 0.0)
|
||||
self.assertGreater(decision.momentum_budget_pct, 0.0)
|
||||
|
||||
def test_dynamic_universe_picks_highest_volume_symbols(self) -> None:
|
||||
bundle = make_bundle()
|
||||
bundle.prices["SOL"]["volume"] = 100_000.0
|
||||
selected = select_dynamic_universe(
|
||||
bundle.prices,
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
min_history_bars=120,
|
||||
lookback_bars=30,
|
||||
max_symbols=1,
|
||||
min_avg_dollar_volume=1_000_000.0,
|
||||
)
|
||||
self.assertEqual(selected, ["ETH"])
|
||||
|
||||
def test_dynamic_universe_supports_unlimited_selection(self) -> None:
|
||||
bundle = make_bundle()
|
||||
selected = select_dynamic_universe(
|
||||
bundle.prices,
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
min_history_bars=120,
|
||||
lookback_bars=30,
|
||||
max_symbols=0,
|
||||
min_avg_dollar_volume=1_000_000.0,
|
||||
)
|
||||
self.assertEqual(selected, ["ETH", "SOL"])
|
||||
|
||||
def test_live_hard_filter_uses_daily_cut_before_ranking(self) -> None:
|
||||
bundle = make_bundle()
|
||||
bundle.prices["SOL"]["volume"] = 10_000.0
|
||||
selected = _select_live_hard_filter_symbols(
|
||||
bundle.prices,
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
config=LiveMonitorConfig(
|
||||
hard_filter_min_history_bars=120,
|
||||
hard_filter_lookback_bars=30,
|
||||
hard_filter_min_avg_dollar_volume=1_000_000.0,
|
||||
),
|
||||
)
|
||||
self.assertEqual(selected, ["BTC", "ETH"])
|
||||
|
||||
def test_backtester_daily_hard_filter_cache_sticks_within_day(self) -> None:
|
||||
bundle = make_bundle(bars=12)
|
||||
bundle.prices["ETH"]["volume"] = 60_000.0
|
||||
bundle.prices["SOL"]["volume"] = 1.0
|
||||
bundle.prices["SOL"].loc[2, "volume"] = 120_000.0
|
||||
bundle.prices["SOL"].loc[5, "volume"] = 1.0
|
||||
config = Strategy32Config(
|
||||
hard_filter_refresh_cadence="1d",
|
||||
hard_filter_min_history_bars=1,
|
||||
hard_filter_lookback_bars=1,
|
||||
hard_filter_min_avg_dollar_volume=1_000_000.0,
|
||||
)
|
||||
backtester = Strategy32MomentumCarryBacktester(config, bundle)
|
||||
first_ts = bundle.prices["BTC"]["timestamp"].iloc[2]
|
||||
later_same_day_ts = bundle.prices["BTC"]["timestamp"].iloc[5]
|
||||
initial = backtester._hard_filter_symbols(first_ts, min_history_bars=1)
|
||||
same_day = backtester._hard_filter_symbols(later_same_day_ts, min_history_bars=1)
|
||||
self.assertIn("SOL", initial)
|
||||
self.assertIn("SOL", same_day)
|
||||
|
||||
def test_backtester_intraday_hard_filter_changes_without_daily_cache(self) -> None:
|
||||
bundle = make_bundle(bars=12)
|
||||
bundle.prices["ETH"]["volume"] = 60_000.0
|
||||
bundle.prices["SOL"]["volume"] = 1.0
|
||||
bundle.prices["SOL"].loc[2, "volume"] = 120_000.0
|
||||
bundle.prices["SOL"].loc[5, "volume"] = 1.0
|
||||
config = Strategy32Config(
|
||||
hard_filter_refresh_cadence="4h",
|
||||
hard_filter_min_history_bars=1,
|
||||
hard_filter_lookback_bars=1,
|
||||
hard_filter_min_avg_dollar_volume=1_000_000.0,
|
||||
)
|
||||
backtester = Strategy32MomentumCarryBacktester(config, bundle)
|
||||
first_ts = bundle.prices["BTC"]["timestamp"].iloc[2]
|
||||
later_same_day_ts = bundle.prices["BTC"]["timestamp"].iloc[5]
|
||||
initial = backtester._hard_filter_symbols(first_ts, min_history_bars=1)
|
||||
same_day = backtester._hard_filter_symbols(later_same_day_ts, min_history_bars=1)
|
||||
self.assertIn("SOL", initial)
|
||||
self.assertNotIn("SOL", same_day)
|
||||
|
||||
def test_weekly_macro_filter_flags_downtrend_as_risk_off(self) -> None:
|
||||
timestamps = pd.date_range("2024-01-01", periods=400, freq="1D", tz="UTC")
|
||||
prices = {
|
||||
"BTC": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"open": [300.0 - i * 0.4 for i in range(len(timestamps))],
|
||||
"high": [301.0 - i * 0.4 for i in range(len(timestamps))],
|
||||
"low": [299.0 - i * 0.4 for i in range(len(timestamps))],
|
||||
"close": [300.0 - i * 0.4 for i in range(len(timestamps))],
|
||||
"volume": [1_000_000.0] * len(timestamps),
|
||||
}
|
||||
)
|
||||
}
|
||||
macro = _weekly_macro_filter_state(
|
||||
prices,
|
||||
timestamp=timestamps[-1],
|
||||
config=LiveMonitorConfig(macro_filter_fast_weeks=10, macro_filter_slow_weeks=30),
|
||||
)
|
||||
self.assertFalse(macro["risk_on"])
|
||||
|
||||
def test_weekly_macro_filter_removes_tradeable_core_targets_when_risk_off(self) -> None:
|
||||
filtered = _apply_weekly_macro_filter(
|
||||
[
|
||||
{"instrument": "perp:ETH", "tradeable": True, "source": "core", "weight": 0.3},
|
||||
{"instrument": "carry:SOL", "tradeable": False, "source": "core", "weight": 0.1},
|
||||
],
|
||||
macro_state={"risk_on": False},
|
||||
)
|
||||
self.assertEqual(filtered, [{"instrument": "carry:SOL", "tradeable": False, "source": "core", "weight": 0.1}])
|
||||
|
||||
def test_execution_refinement_blocks_extended_entry(self) -> None:
|
||||
bars = 64
|
||||
timestamps = pd.date_range("2025-01-01", periods=bars, freq="1h", tz="UTC")
|
||||
prices = {
|
||||
"ETH": pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps,
|
||||
"open": [100.0 + i * 0.4 for i in range(bars)],
|
||||
"high": [100.3 + i * 0.4 for i in range(bars)],
|
||||
"low": [99.7 + i * 0.4 for i in range(bars)],
|
||||
"close": [100.0 + i * 0.4 for i in range(bars - 1)] + [140.0],
|
||||
"volume": [1_000_000.0] * bars,
|
||||
}
|
||||
)
|
||||
}
|
||||
states = _execution_refinement_states(
|
||||
prices,
|
||||
timestamp=timestamps[-1],
|
||||
config=LiveMonitorConfig(
|
||||
execution_refinement_lookback_bars=48,
|
||||
execution_refinement_fast_ema=8,
|
||||
execution_refinement_slow_ema=21,
|
||||
execution_refinement_scale_down_gap=0.008,
|
||||
execution_refinement_max_chase_gap=0.018,
|
||||
execution_refinement_max_recent_return=0.03,
|
||||
execution_refinement_scale_down_factor=0.5,
|
||||
),
|
||||
)
|
||||
self.assertEqual(states["ETH"]["action"], "block")
|
||||
|
||||
def test_refined_execution_targets_scale_positive_perp_only(self) -> None:
|
||||
refined = _refine_execution_targets(
|
||||
[
|
||||
{"instrument": "perp:ETH", "tradeable": True, "weight": 0.4},
|
||||
{"instrument": "perp:BTC", "tradeable": True, "weight": -0.2},
|
||||
],
|
||||
refinement_states={"ETH": {"action": "scale_down", "scale": 0.5, "reason": "slightly_extended"}},
|
||||
)
|
||||
self.assertEqual(refined[0]["weight"], 0.2)
|
||||
self.assertEqual(refined[0]["desired_weight"], 0.4)
|
||||
self.assertEqual(refined[1]["weight"], -0.2)
|
||||
|
||||
def test_executor_entry_only_refinement_does_not_force_close_existing_position(self) -> None:
|
||||
class FakeClient:
|
||||
def __init__(self) -> None:
|
||||
self.orders: list[dict[str, object]] = []
|
||||
|
||||
def get_balance(self):
|
||||
return [{"asset": "USDT", "balance": "1000"}]
|
||||
|
||||
def get_position_risk(self):
|
||||
return [{"symbol": "ETHUSDT", "positionAmt": "1", "markPrice": "100", "entryPrice": "100", "notional": "100", "unRealizedProfit": "0"}]
|
||||
|
||||
def get_ticker_price(self, symbol):
|
||||
return {"symbol": symbol, "price": "100"}
|
||||
|
||||
def get_exchange_info(self):
|
||||
return {
|
||||
"symbols": [
|
||||
{
|
||||
"symbol": "ETHUSDT",
|
||||
"baseAsset": "ETH",
|
||||
"quoteAsset": "USDT",
|
||||
"filters": [
|
||||
{"filterType": "LOT_SIZE", "stepSize": "0.001", "minQty": "0.001"},
|
||||
{"filterType": "MIN_NOTIONAL", "notional": "5"},
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def set_leverage(self, symbol, leverage):
|
||||
return {"symbol": symbol, "leverage": leverage}
|
||||
|
||||
def place_market_order(self, **kwargs):
|
||||
self.orders.append(kwargs)
|
||||
return {"status": "FILLED", **kwargs}
|
||||
|
||||
executor = LiveFuturesExecutor(
|
||||
FakeClient(),
|
||||
LiveExecutionConfig(
|
||||
enabled=True,
|
||||
leverage=2,
|
||||
min_target_notional_usd=25.0,
|
||||
min_rebalance_notional_usd=10.0,
|
||||
close_orphan_positions=True,
|
||||
entry_only_refinement=True,
|
||||
),
|
||||
)
|
||||
result = executor.reconcile(
|
||||
{
|
||||
"generated_at": "2026-03-16T00:00:00Z",
|
||||
"universe": {"quote_by_symbol": {"ETH": "USDT"}},
|
||||
"execution_targets": [
|
||||
{
|
||||
"instrument": "perp:ETH",
|
||||
"tradeable": True,
|
||||
"weight": 0.0,
|
||||
"desired_weight": 0.4,
|
||||
"refinement_action": "block",
|
||||
"refinement_reason": "too_extended",
|
||||
}
|
||||
],
|
||||
}
|
||||
)
|
||||
self.assertEqual(result.orders, [])
|
||||
|
||||
def test_capital_summary_extracts_usdt_and_usdc(self) -> None:
|
||||
summary = _capital_summary(
|
||||
{
|
||||
"balances": [
|
||||
{"asset": "USDT", "balance": "1200.5"},
|
||||
{"asset": "USDC", "balance": "300.25"},
|
||||
{"asset": "BTC", "balance": "0.1"},
|
||||
]
|
||||
}
|
||||
)
|
||||
self.assertEqual(summary, {"usdt": 1200.5, "usdc": 300.25, "total_quote": 1500.75})
|
||||
|
||||
def test_momentum_universe_prefers_stronger_symbol(self) -> None:
|
||||
bundle = make_bundle()
|
||||
ranked = rank_momentum_universe(
|
||||
bundle.prices,
|
||||
bundle.funding,
|
||||
btc_symbol="BTC",
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
candidate_symbols=["ETH", "SOL"],
|
||||
min_history_bars=120,
|
||||
liquidity_lookback_bars=30,
|
||||
short_lookback_bars=18,
|
||||
long_lookback_bars=72,
|
||||
overheat_funding_rate=0.00025,
|
||||
max_symbols=2,
|
||||
)
|
||||
self.assertEqual(ranked[0], "SOL")
|
||||
|
||||
def test_momentum_quality_filter_drops_overheated_symbol(self) -> None:
|
||||
bundle = make_bundle()
|
||||
bundle.prices["PEPE"] = bundle.prices["ETH"].copy()
|
||||
bundle.prices["PEPE"]["close"] = [10.0 + i * 0.80 for i in range(len(bundle.prices["PEPE"]))]
|
||||
bundle.prices["PEPE"]["volume"] = [3_000_000.0] * len(bundle.prices["PEPE"])
|
||||
bundle.funding["PEPE"] = pd.DataFrame(
|
||||
{
|
||||
"timestamp": bundle.prices["PEPE"]["timestamp"],
|
||||
"funding_rate": [0.0008] * len(bundle.prices["PEPE"]),
|
||||
"basis": [0.012] * len(bundle.prices["PEPE"]),
|
||||
}
|
||||
)
|
||||
frame = score_momentum_universe(
|
||||
bundle.prices,
|
||||
bundle.funding,
|
||||
btc_symbol="BTC",
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
candidate_symbols=["ETH", "PEPE"],
|
||||
min_history_bars=120,
|
||||
liquidity_lookback_bars=30,
|
||||
short_lookback_bars=18,
|
||||
long_lookback_bars=72,
|
||||
overheat_funding_rate=0.00025,
|
||||
)
|
||||
filtered = filter_momentum_frame(
|
||||
frame,
|
||||
min_score=0.0,
|
||||
min_relative_strength=-1.0,
|
||||
min_7d_return=-1.0,
|
||||
max_7d_return=0.35,
|
||||
min_positive_bar_ratio=0.0,
|
||||
max_short_volatility=1.0,
|
||||
max_latest_funding_rate=0.00045,
|
||||
max_beta=10.0,
|
||||
)
|
||||
self.assertIn("ETH", filtered["symbol"].tolist())
|
||||
self.assertNotIn("PEPE", filtered["symbol"].tolist())
|
||||
|
||||
def test_strategic_universe_keeps_symbols_with_positive_edge(self) -> None:
|
||||
bundle = make_bundle()
|
||||
selected = select_strategic_universe(
|
||||
bundle.prices,
|
||||
bundle.funding,
|
||||
btc_symbol="BTC",
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
min_history_bars=120,
|
||||
lookback_bars=30,
|
||||
min_avg_dollar_volume=1_000_000.0,
|
||||
short_lookback_bars=18,
|
||||
long_lookback_bars=72,
|
||||
overheat_funding_rate=0.00025,
|
||||
carry_lookback_bars=21,
|
||||
carry_expected_horizon_bars=18,
|
||||
carry_roundtrip_cost_pct=0.0020,
|
||||
carry_basis_risk_multiplier=1.0,
|
||||
momentum_min_score=0.0,
|
||||
momentum_min_relative_strength=-1.0,
|
||||
momentum_min_7d_return=-1.0,
|
||||
momentum_max_7d_return=1.0,
|
||||
momentum_min_positive_bar_ratio=0.0,
|
||||
momentum_max_short_volatility=1.0,
|
||||
momentum_max_latest_funding_rate=1.0,
|
||||
momentum_max_beta=10.0,
|
||||
carry_min_expected_edge=-1.0,
|
||||
max_symbols=0,
|
||||
)
|
||||
self.assertIn("ETH", selected)
|
||||
self.assertIn("SOL", selected)
|
||||
|
||||
def test_correlation_limit_drops_duplicate_path(self) -> None:
|
||||
bundle = make_bundle()
|
||||
bundle.prices["LINK"] = bundle.prices["ETH"].copy()
|
||||
bundle.prices["LINK"]["close"] = [30.0 + i * 0.10 for i in range(len(bundle.prices["LINK"]))]
|
||||
bundle.prices["SOL"]["close"] = [20.0 + i * 0.18 + (0.9 if i % 2 == 0 else -0.7) for i in range(len(bundle.prices["SOL"]))]
|
||||
limited = limit_correlated_symbols(
|
||||
bundle.prices,
|
||||
timestamp=bundle.prices["BTC"]["timestamp"].iloc[-1],
|
||||
candidate_symbols=["ETH", "LINK", "SOL"],
|
||||
lookback_bars=36,
|
||||
max_pairwise_correlation=0.80,
|
||||
max_symbols=2,
|
||||
)
|
||||
self.assertEqual(limited, ["ETH", "SOL"])
|
||||
|
||||
def test_backtester_runs(self) -> None:
|
||||
result = Strategy32Backtester(
|
||||
Strategy32Config(
|
||||
symbols=["BTC", "ETH", "SOL"],
|
||||
momentum_min_history_bars=120,
|
||||
momentum_max_7d_return=1.0,
|
||||
momentum_min_positive_bar_ratio=0.0,
|
||||
momentum_max_short_volatility=1.0,
|
||||
momentum_max_beta=10.0,
|
||||
momentum_max_latest_funding_rate=1.0,
|
||||
),
|
||||
make_bundle(),
|
||||
).run()
|
||||
self.assertGreater(result.total_trades, 0)
|
||||
self.assertIn("momentum", result.engine_pnl)
|
||||
|
||||
def test_backtester_execution_refinement_blocks_entry_and_logs_rejection(self) -> None:
|
||||
bundle = make_bundle()
|
||||
bundle.prices = {"BTC": bundle.prices["BTC"], "ETH": bundle.prices["ETH"]}
|
||||
bundle.funding = {"ETH": bundle.funding["ETH"]}
|
||||
config = Strategy32Config(
|
||||
symbols=["BTC", "ETH"],
|
||||
budgets=Strategy32Budgets(
|
||||
strong_up_carry=0.0,
|
||||
up_carry=0.0,
|
||||
sideways_carry=0.0,
|
||||
down_carry=0.0,
|
||||
strong_up_sideways=0.0,
|
||||
up_sideways=0.0,
|
||||
sideways_sideways=0.0,
|
||||
down_sideways=0.0,
|
||||
),
|
||||
)
|
||||
result = Strategy32Backtester(
|
||||
config,
|
||||
bundle,
|
||||
execution_prices=make_execution_prices(bundle, blocked_symbols={"ETH"}),
|
||||
).run(close_final_positions=False)
|
||||
summary = result.metadata.get("rejection_summary", {})
|
||||
self.assertGreater(summary.get("execution_refinement_blocked", 0), 0)
|
||||
final_positions = result.metadata.get("final_positions", [])
|
||||
self.assertTrue(all(position["engine"] != "momentum" for position in final_positions))
|
||||
|
||||
def test_backtester_rejection_logging_records_empty_universe(self) -> None:
|
||||
bundle = make_bundle()
|
||||
config = Strategy32Config(
|
||||
symbols=["BTC", "ETH", "SOL"],
|
||||
universe_min_avg_dollar_volume=1_000_000_000_000.0,
|
||||
budgets=Strategy32Budgets(
|
||||
strong_up_carry=0.0,
|
||||
up_carry=0.0,
|
||||
sideways_carry=0.0,
|
||||
down_carry=0.0,
|
||||
strong_up_sideways=0.0,
|
||||
up_sideways=0.0,
|
||||
sideways_sideways=0.0,
|
||||
down_sideways=0.0,
|
||||
),
|
||||
)
|
||||
result = Strategy32Backtester(config, bundle).run()
|
||||
summary = result.metadata.get("rejection_summary", {})
|
||||
self.assertGreater(summary.get("tradeable_universe_empty", 0), 0)
|
||||
|
||||
def test_backtester_liquidity_and_momentum_fallback_can_restore_candidates(self) -> None:
|
||||
bundle = make_bundle()
|
||||
config = Strategy32Config(
|
||||
symbols=["BTC", "ETH", "SOL"],
|
||||
momentum_min_score=10.0,
|
||||
carry_min_expected_edge=10.0,
|
||||
universe_fallback_min_avg_dollar_volume=1_000_000.0,
|
||||
universe_fallback_top_n=2,
|
||||
momentum_fallback_min_score=0.0,
|
||||
momentum_fallback_min_relative_strength=-1.0,
|
||||
momentum_fallback_min_7d_return=-1.0,
|
||||
budgets=Strategy32Budgets(
|
||||
strong_up_carry=0.0,
|
||||
up_carry=0.0,
|
||||
sideways_carry=0.0,
|
||||
down_carry=0.0,
|
||||
strong_up_sideways=0.0,
|
||||
up_sideways=0.0,
|
||||
sideways_sideways=0.0,
|
||||
down_sideways=0.0,
|
||||
),
|
||||
)
|
||||
result = Strategy32Backtester(
|
||||
config,
|
||||
bundle,
|
||||
execution_prices=make_execution_prices(bundle),
|
||||
).run(close_final_positions=False)
|
||||
summary = result.metadata.get("rejection_summary", {})
|
||||
self.assertGreater(summary.get("dynamic_universe_fallback_used", 0), 0)
|
||||
self.assertGreater(summary.get("momentum_filter_fallback_used", 0), 0)
|
||||
final_positions = result.metadata.get("final_positions", [])
|
||||
self.assertTrue(any(position["engine"] == "momentum" for position in final_positions))
|
||||
|
||||
def test_trade_start_blocks_warmup_trades(self) -> None:
|
||||
bundle = make_bundle()
|
||||
trade_start = bundle.prices["BTC"]["timestamp"].iloc[-40]
|
||||
result = Strategy32Backtester(
|
||||
Strategy32Config(symbols=["BTC", "ETH", "SOL"]),
|
||||
bundle,
|
||||
trade_start=trade_start,
|
||||
).run()
|
||||
self.assertTrue(all(trade.entry_time >= trade_start for trade in result.trades))
|
||||
|
||||
def test_profile_helpers_select_expected_feature_flags(self) -> None:
|
||||
default_cfg = build_strategy32_config(PROFILE_V7_DEFAULT)
|
||||
self.assertFalse(default_cfg.enable_sideways_engine)
|
||||
self.assertTrue(default_cfg.enable_strong_kill_switch)
|
||||
self.assertTrue(default_cfg.enable_daily_trend_filter)
|
||||
self.assertFalse(default_cfg.enable_expanded_hedge)
|
||||
self.assertFalse(default_cfg.enable_max_holding_exit)
|
||||
|
||||
baseline_cfg = build_strategy32_config(PROFILE_V5_BASELINE)
|
||||
self.assertTrue(baseline_cfg.enable_sideways_engine)
|
||||
self.assertFalse(baseline_cfg.enable_strong_kill_switch)
|
||||
self.assertFalse(baseline_cfg.enable_daily_trend_filter)
|
||||
self.assertFalse(baseline_cfg.enable_expanded_hedge)
|
||||
self.assertFalse(baseline_cfg.enable_max_holding_exit)
|
||||
|
||||
self.assertFalse(Strategy32Config().enable_sideways_engine)
|
||||
self.assertTrue(Strategy32Config().enable_strong_kill_switch)
|
||||
self.assertTrue(Strategy32Config().enable_daily_trend_filter)
|
||||
|
||||
def test_binance_history_fetch_uses_stale_cache_on_http_error(self) -> None:
|
||||
url = "https://example.com/test.json"
|
||||
|
||||
class FakeResponse:
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
def read(self) -> bytes:
|
||||
return json.dumps({"ok": True}).encode("utf-8")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
with (
|
||||
patch.object(binance_history, "DEFAULT_CACHE_DIR", Path(tmpdir)),
|
||||
patch.object(binance_history, "DEFAULT_HTTP_RETRIES", 1),
|
||||
patch.object(binance_history, "urlopen", side_effect=[FakeResponse(), HTTPError(url, 418, "blocked", hdrs=None, fp=None)]),
|
||||
):
|
||||
first = binance_history._fetch_json(url, ttl_seconds=0)
|
||||
second = binance_history._fetch_json(url, ttl_seconds=0)
|
||||
self.assertEqual(first, {"ok": True})
|
||||
self.assertEqual(second, {"ok": True})
|
||||
|
||||
def test_soft_router_weights_remain_bounded(self) -> None:
|
||||
timestamps = list(pd.date_range("2025-01-01", periods=4, freq="4h", tz="UTC"))
|
||||
score_frame = pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps[:-1],
|
||||
"core_score": [0.8, 0.2, 0.1],
|
||||
"panic_score": [0.0, 0.6, 0.1],
|
||||
"choppy_score": [0.1, 0.7, 0.8],
|
||||
"distribution_score": [0.1, 0.2, 0.9],
|
||||
}
|
||||
)
|
||||
returns = pd.Series([0.01, -0.02, 0.03], index=pd.DatetimeIndex(timestamps[1:], name="timestamp"))
|
||||
curve, weights = compose_soft_router_curve(
|
||||
timestamps=timestamps,
|
||||
score_frame=score_frame,
|
||||
core_returns=returns,
|
||||
cap_returns=returns * 0.5,
|
||||
chop_returns=returns * 0.25,
|
||||
dist_returns=returns * -0.10,
|
||||
candidate=SoftRouterCandidate(
|
||||
regime_profile="base",
|
||||
core_filter="overheat_tolerant",
|
||||
cap_engine="cap_btc_rebound",
|
||||
chop_engine="chop_inverse_carry",
|
||||
dist_engine="dist_inverse_carry_strict",
|
||||
core_floor=0.1,
|
||||
cap_max_weight=0.4,
|
||||
chop_max_weight=0.3,
|
||||
dist_max_weight=0.2,
|
||||
chop_blend_floor=0.15,
|
||||
),
|
||||
)
|
||||
self.assertEqual(len(curve), 4)
|
||||
total_weights = weights[["core_weight", "cap_weight", "chop_weight", "dist_weight", "cash_weight"]].sum(axis=1)
|
||||
self.assertTrue(((total_weights - 1.0).abs() < 1e-9).all())
|
||||
|
||||
def test_cash_overlay_respects_core_cash_budget(self) -> None:
|
||||
timestamps = list(pd.date_range("2025-01-01", periods=4, freq="4h", tz="UTC"))
|
||||
score_frame = pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps[:-1],
|
||||
"core_score": [0.2, 0.8, 0.1],
|
||||
"panic_score": [0.8, 0.1, 0.0],
|
||||
"choppy_score": [0.6, 0.7, 0.2],
|
||||
"distribution_score": [0.2, 0.9, 0.8],
|
||||
}
|
||||
)
|
||||
core_returns = pd.Series([0.01, 0.00, 0.02], index=pd.DatetimeIndex(timestamps[1:], name="timestamp"))
|
||||
overlay_returns = pd.Series([0.02, 0.01, -0.01], index=pd.DatetimeIndex(timestamps[1:], name="timestamp"))
|
||||
core_exposure_frame = pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps[:-1],
|
||||
"cash_pct": [0.50, 0.30, 0.80],
|
||||
}
|
||||
)
|
||||
curve, weights = compose_cash_overlay_curve(
|
||||
timestamps=timestamps,
|
||||
score_frame=score_frame,
|
||||
core_returns=core_returns,
|
||||
core_exposure_frame=core_exposure_frame,
|
||||
cap_returns=overlay_returns,
|
||||
chop_returns=overlay_returns,
|
||||
dist_returns=overlay_returns,
|
||||
candidate=CashOverlayCandidate(
|
||||
regime_profile="loose_positive",
|
||||
core_filter="overheat_tolerant",
|
||||
cap_engine="cap_btc_rebound",
|
||||
chop_engine="chop_inverse_carry_strict",
|
||||
dist_engine="dist_inverse_carry_strict",
|
||||
cap_cash_weight=0.80,
|
||||
chop_cash_weight=0.80,
|
||||
dist_cash_weight=0.80,
|
||||
cap_threshold=0.20,
|
||||
chop_threshold=0.20,
|
||||
dist_threshold=0.20,
|
||||
core_block_threshold=0.50,
|
||||
),
|
||||
)
|
||||
self.assertEqual(len(curve), 4)
|
||||
self.assertTrue((weights["overlay_total"] <= weights["core_cash_pct"] + 1e-9).all())
|
||||
|
||||
def test_cash_overlay_macro_scale_reduces_core_return_and_frees_cash(self) -> None:
|
||||
timestamps = list(pd.date_range("2025-01-01", periods=4, freq="4h", tz="UTC"))
|
||||
score_frame = pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps[:-1],
|
||||
"core_score": [0.1, 0.1, 0.1],
|
||||
"panic_score": [0.0, 0.0, 0.0],
|
||||
"choppy_score": [0.0, 0.0, 0.0],
|
||||
"distribution_score": [0.0, 0.0, 0.0],
|
||||
}
|
||||
)
|
||||
core_returns = pd.Series([0.10, 0.10, 0.10], index=pd.DatetimeIndex(timestamps[1:], name="timestamp"))
|
||||
core_exposure_frame = pd.DataFrame(
|
||||
{
|
||||
"timestamp": timestamps[:-1],
|
||||
"cash_pct": [0.20, 0.20, 0.20],
|
||||
}
|
||||
)
|
||||
curve, weights = compose_cash_overlay_curve(
|
||||
timestamps=timestamps,
|
||||
score_frame=score_frame,
|
||||
core_returns=core_returns,
|
||||
core_exposure_frame=core_exposure_frame,
|
||||
cap_returns=core_returns * 0.0,
|
||||
chop_returns=core_returns * 0.0,
|
||||
dist_returns=core_returns * 0.0,
|
||||
candidate=CashOverlayCandidate(
|
||||
regime_profile="loose_positive",
|
||||
core_filter="overheat_tolerant",
|
||||
cap_engine="cap_btc_rebound",
|
||||
chop_engine="chop_inverse_carry_strict",
|
||||
dist_engine="dist_inverse_carry_strict",
|
||||
cap_cash_weight=0.0,
|
||||
chop_cash_weight=0.0,
|
||||
dist_cash_weight=0.0,
|
||||
cap_threshold=0.20,
|
||||
chop_threshold=0.20,
|
||||
dist_threshold=0.20,
|
||||
core_block_threshold=0.50,
|
||||
),
|
||||
macro_scale_map=pd.Series(
|
||||
[0.50, 0.50, 0.50],
|
||||
index=pd.DatetimeIndex(timestamps[:-1], name="timestamp"),
|
||||
dtype=float,
|
||||
),
|
||||
)
|
||||
self.assertAlmostEqual(float(weights["macro_scale"].iloc[0]), 0.50)
|
||||
self.assertAlmostEqual(float(weights["core_cash_pct"].iloc[0]), 0.60)
|
||||
self.assertAlmostEqual(float(curve.iloc[-1]), 1000.0 * (1.05 ** 3), places=6)
|
||||
|
||||
def test_expand_core_targets_adds_btc_hedge(self) -> None:
|
||||
targets = _expand_core_targets(
|
||||
[
|
||||
{
|
||||
"engine": "momentum",
|
||||
"symbol": "ETH",
|
||||
"value": 250.0,
|
||||
"meta": {"hedge_ratio": 0.4},
|
||||
},
|
||||
{
|
||||
"engine": "carry",
|
||||
"symbol": "SOL",
|
||||
"value": 100.0,
|
||||
"meta": {},
|
||||
},
|
||||
],
|
||||
final_equity=1000.0,
|
||||
)
|
||||
by_instrument = {row["instrument"]: row for row in targets}
|
||||
self.assertAlmostEqual(float(by_instrument["perp:ETH"]["weight"]), 0.25)
|
||||
self.assertAlmostEqual(float(by_instrument["perp:BTC"]["weight"]), -0.10)
|
||||
self.assertFalse(bool(by_instrument["carry:SOL"]["tradeable"]))
|
||||
|
||||
def test_overlay_signal_strengths_block_core_scores(self) -> None:
|
||||
signals = _overlay_signal_strengths(
|
||||
BEST_CASH_OVERLAY,
|
||||
{
|
||||
"core_score": 0.90,
|
||||
"panic_score": 0.50,
|
||||
"choppy_score": 0.80,
|
||||
"distribution_score": 0.90,
|
||||
},
|
||||
)
|
||||
self.assertGreater(signals["cap_signal"], 0.0)
|
||||
self.assertLess(signals["chop_signal"], 0.25)
|
||||
self.assertLess(signals["dist_signal"], 0.35)
|
||||
|
||||
def test_combine_targets_aggregates_same_instrument(self) -> None:
|
||||
combined = _combine_targets(
|
||||
[{"instrument": "perp:BTC", "weight": -0.10, "tradeable": True, "source": "core", "note": "hedge"}],
|
||||
[{"instrument": "perp:BTC", "weight": -0.05, "tradeable": True, "source": "overlay", "note": "cap"}],
|
||||
equity=1000.0,
|
||||
)
|
||||
self.assertEqual(len(combined), 1)
|
||||
self.assertAlmostEqual(float(combined[0]["weight"]), -0.15)
|
||||
self.assertAlmostEqual(float(combined[0]["notional_usd"]), -150.0)
|
||||
|
||||
def test_completed_bar_time_aligns_to_4h(self) -> None:
|
||||
ts = pd.Timestamp("2026-03-16 09:17:00+00:00")
|
||||
self.assertEqual(_completed_bar_time(ts, "4h"), pd.Timestamp("2026-03-16 08:00:00+00:00"))
|
||||
|
||||
def test_heartbeat_slot_uses_half_hour_boundaries(self) -> None:
|
||||
self.assertEqual(_heartbeat_slot(pd.Timestamp("2026-03-16 09:17:00+00:00")), (2026, 3, 16, 9, 0))
|
||||
self.assertEqual(_heartbeat_slot(pd.Timestamp("2026-03-16 09:31:00+00:00")), (2026, 3, 16, 9, 30))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
437
universe.py
Normal file
437
universe.py
Normal file
@@ -0,0 +1,437 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def select_dynamic_universe(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
*,
|
||||
timestamp: pd.Timestamp,
|
||||
min_history_bars: int,
|
||||
lookback_bars: int,
|
||||
max_symbols: int,
|
||||
min_avg_dollar_volume: float,
|
||||
base_symbol: str = "BTC",
|
||||
) -> list[str]:
|
||||
ranked: list[tuple[float, str]] = []
|
||||
for symbol, df in prices.items():
|
||||
if symbol == base_symbol:
|
||||
continue
|
||||
hist = df.loc[df["timestamp"] <= timestamp]
|
||||
if len(hist) < min_history_bars:
|
||||
continue
|
||||
recent = hist.tail(lookback_bars)
|
||||
if recent.empty:
|
||||
continue
|
||||
avg_dollar_volume = float((recent["close"] * recent["volume"]).mean())
|
||||
if avg_dollar_volume < min_avg_dollar_volume:
|
||||
continue
|
||||
ranked.append((avg_dollar_volume, symbol))
|
||||
ranked.sort(reverse=True)
|
||||
if max_symbols > 0:
|
||||
ranked = ranked[:max_symbols]
|
||||
return [symbol for _, symbol in ranked]
|
||||
|
||||
|
||||
def score_momentum_universe(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
funding: dict[str, pd.DataFrame],
|
||||
*,
|
||||
btc_symbol: str,
|
||||
timestamp: pd.Timestamp,
|
||||
candidate_symbols: list[str],
|
||||
min_history_bars: int,
|
||||
liquidity_lookback_bars: int,
|
||||
short_lookback_bars: int,
|
||||
long_lookback_bars: int,
|
||||
overheat_funding_rate: float,
|
||||
) -> pd.DataFrame:
|
||||
btc_hist = prices[btc_symbol].loc[prices[btc_symbol]["timestamp"] <= timestamp]
|
||||
if len(btc_hist) < max(min_history_bars, long_lookback_bars + 5):
|
||||
return pd.DataFrame(
|
||||
columns=[
|
||||
"symbol",
|
||||
"avg_dollar_volume",
|
||||
"volume_stability",
|
||||
"short_return",
|
||||
"relative_strength_short",
|
||||
"relative_strength_long",
|
||||
"beta",
|
||||
"positive_bar_ratio",
|
||||
"short_volatility",
|
||||
"low_volatility",
|
||||
"latest_funding_rate",
|
||||
"funding_penalty",
|
||||
"score",
|
||||
]
|
||||
)
|
||||
btc_hist = btc_hist.tail(long_lookback_bars + 5)
|
||||
btc_close = btc_hist["close"]
|
||||
btc_returns = btc_close.pct_change().dropna()
|
||||
btc_ret_short = float(btc_close.iloc[-1] / btc_close.iloc[-short_lookback_bars] - 1.0)
|
||||
btc_ret_long = float(btc_close.iloc[-1] / btc_close.iloc[0] - 1.0)
|
||||
|
||||
rows: list[dict[str, float | str]] = []
|
||||
for symbol in candidate_symbols:
|
||||
hist = prices[symbol].loc[prices[symbol]["timestamp"] <= timestamp]
|
||||
if len(hist) < max(min_history_bars, long_lookback_bars + 5):
|
||||
continue
|
||||
hist = hist.tail(long_lookback_bars + 5)
|
||||
recent = hist.tail(liquidity_lookback_bars)
|
||||
dollar_volume = recent["close"] * recent["volume"]
|
||||
returns = hist["close"].pct_change().dropna()
|
||||
if len(returns) < 12:
|
||||
continue
|
||||
overlap = min(len(returns), len(btc_returns), long_lookback_bars)
|
||||
alt_beta = returns.tail(overlap).to_numpy()
|
||||
btc_beta = btc_returns.tail(overlap).to_numpy()
|
||||
btc_var = float(np.var(btc_beta))
|
||||
beta = float(np.cov(alt_beta, btc_beta)[0, 1] / btc_var) if overlap >= 10 and btc_var > 1e-12 else 0.0
|
||||
short_returns = returns.tail(short_lookback_bars)
|
||||
short_volatility = float(short_returns.std(ddof=0)) if len(short_returns) >= 6 else float("inf")
|
||||
positive_bar_ratio = float((short_returns > 0).mean()) if len(short_returns) else 0.0
|
||||
short_return = float(hist["close"].iloc[-1] / hist["close"].iloc[-short_lookback_bars] - 1.0)
|
||||
long_return = float(hist["close"].iloc[-1] / hist["close"].iloc[0] - 1.0)
|
||||
latest_funding = 0.0
|
||||
if symbol in funding:
|
||||
f_hist = funding[symbol].loc[funding[symbol]["timestamp"] <= timestamp]
|
||||
if not f_hist.empty:
|
||||
latest_funding = float(f_hist["funding_rate"].iloc[-1])
|
||||
rows.append(
|
||||
{
|
||||
"symbol": symbol,
|
||||
"avg_dollar_volume": float(dollar_volume.mean()),
|
||||
"volume_stability": float(1.0 / ((dollar_volume.std(ddof=0) / (dollar_volume.mean() + 1e-9)) + 1e-9)),
|
||||
"short_return": short_return,
|
||||
"relative_strength_short": short_return - btc_ret_short,
|
||||
"relative_strength_long": long_return - btc_ret_long,
|
||||
"beta": beta,
|
||||
"positive_bar_ratio": positive_bar_ratio,
|
||||
"short_volatility": short_volatility,
|
||||
"low_volatility": float(1.0 / (returns.tail(short_lookback_bars).std(ddof=0) + 1e-9)),
|
||||
"latest_funding_rate": latest_funding,
|
||||
}
|
||||
)
|
||||
if not rows:
|
||||
return pd.DataFrame(columns=["symbol", "score"])
|
||||
frame = pd.DataFrame(rows)
|
||||
for column in (
|
||||
"avg_dollar_volume",
|
||||
"volume_stability",
|
||||
"short_return",
|
||||
"relative_strength_short",
|
||||
"relative_strength_long",
|
||||
"positive_bar_ratio",
|
||||
"low_volatility",
|
||||
):
|
||||
frame[f"{column}_rank"] = frame[column].rank(pct=True)
|
||||
frame["funding_penalty"] = (
|
||||
(frame["latest_funding_rate"] - overheat_funding_rate).clip(lower=0.0) / max(overheat_funding_rate, 1e-9)
|
||||
).clip(upper=1.5)
|
||||
frame["score"] = (
|
||||
0.15 * frame["avg_dollar_volume_rank"]
|
||||
+ 0.10 * frame["volume_stability_rank"]
|
||||
+ 0.15 * frame["short_return_rank"]
|
||||
+ 0.25 * frame["relative_strength_short_rank"]
|
||||
+ 0.20 * frame["relative_strength_long_rank"]
|
||||
+ 0.10 * frame["low_volatility_rank"]
|
||||
+ 0.05 * frame["positive_bar_ratio_rank"]
|
||||
- 0.10 * frame["funding_penalty"]
|
||||
)
|
||||
return frame.sort_values("score", ascending=False).reset_index(drop=True)
|
||||
|
||||
|
||||
def filter_momentum_frame(
|
||||
frame: pd.DataFrame,
|
||||
*,
|
||||
min_score: float,
|
||||
min_relative_strength: float,
|
||||
min_7d_return: float,
|
||||
max_7d_return: float | None = None,
|
||||
min_positive_bar_ratio: float | None = None,
|
||||
max_short_volatility: float | None = None,
|
||||
max_latest_funding_rate: float | None = None,
|
||||
max_beta: float | None = None,
|
||||
) -> pd.DataFrame:
|
||||
if frame.empty:
|
||||
return frame
|
||||
mask = (
|
||||
(frame["score"] >= min_score)
|
||||
& (frame["relative_strength_short"] >= min_relative_strength)
|
||||
& (frame["relative_strength_long"] >= min_relative_strength)
|
||||
& (frame["short_return"] >= min_7d_return)
|
||||
)
|
||||
if max_7d_return is not None:
|
||||
mask &= frame["short_return"] <= max_7d_return
|
||||
if min_positive_bar_ratio is not None:
|
||||
mask &= frame["positive_bar_ratio"] >= min_positive_bar_ratio
|
||||
if max_short_volatility is not None:
|
||||
mask &= frame["short_volatility"] <= max_short_volatility
|
||||
if max_latest_funding_rate is not None:
|
||||
mask &= frame["latest_funding_rate"] <= max_latest_funding_rate
|
||||
if max_beta is not None:
|
||||
mask &= frame["beta"] <= max_beta
|
||||
return frame.loc[mask].sort_values("score", ascending=False).reset_index(drop=True)
|
||||
|
||||
|
||||
def rank_momentum_universe(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
funding: dict[str, pd.DataFrame],
|
||||
*,
|
||||
btc_symbol: str,
|
||||
timestamp: pd.Timestamp,
|
||||
candidate_symbols: list[str],
|
||||
min_history_bars: int,
|
||||
liquidity_lookback_bars: int,
|
||||
short_lookback_bars: int,
|
||||
long_lookback_bars: int,
|
||||
overheat_funding_rate: float,
|
||||
max_symbols: int,
|
||||
max_7d_return: float | None = None,
|
||||
min_positive_bar_ratio: float | None = None,
|
||||
max_short_volatility: float | None = None,
|
||||
max_latest_funding_rate: float | None = None,
|
||||
max_beta: float | None = None,
|
||||
) -> list[str]:
|
||||
frame = score_momentum_universe(
|
||||
prices,
|
||||
funding,
|
||||
btc_symbol=btc_symbol,
|
||||
timestamp=timestamp,
|
||||
candidate_symbols=candidate_symbols,
|
||||
min_history_bars=min_history_bars,
|
||||
liquidity_lookback_bars=liquidity_lookback_bars,
|
||||
short_lookback_bars=short_lookback_bars,
|
||||
long_lookback_bars=long_lookback_bars,
|
||||
overheat_funding_rate=overheat_funding_rate,
|
||||
)
|
||||
frame = filter_momentum_frame(
|
||||
frame,
|
||||
min_score=float("-inf"),
|
||||
min_relative_strength=float("-inf"),
|
||||
min_7d_return=float("-inf"),
|
||||
max_7d_return=max_7d_return,
|
||||
min_positive_bar_ratio=min_positive_bar_ratio,
|
||||
max_short_volatility=max_short_volatility,
|
||||
max_latest_funding_rate=max_latest_funding_rate,
|
||||
max_beta=max_beta,
|
||||
)
|
||||
if frame.empty:
|
||||
return candidate_symbols[:max_symbols] if max_symbols > 0 else candidate_symbols
|
||||
if max_symbols > 0:
|
||||
frame = frame.head(max_symbols)
|
||||
return [str(symbol) for symbol in frame["symbol"].tolist()]
|
||||
|
||||
|
||||
def score_carry_universe(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
funding: dict[str, pd.DataFrame],
|
||||
*,
|
||||
timestamp: pd.Timestamp,
|
||||
candidate_symbols: list[str],
|
||||
lookback_bars: int,
|
||||
expected_horizon_bars: int,
|
||||
roundtrip_cost_pct: float,
|
||||
basis_risk_multiplier: float,
|
||||
) -> pd.DataFrame:
|
||||
rows: list[dict[str, float | str]] = []
|
||||
for symbol in candidate_symbols:
|
||||
if symbol not in funding:
|
||||
continue
|
||||
f_hist = funding[symbol].loc[funding[symbol]["timestamp"] <= timestamp].tail(lookback_bars)
|
||||
p_hist = prices[symbol].loc[prices[symbol]["timestamp"] <= timestamp].tail(30)
|
||||
if len(f_hist) < lookback_bars or p_hist.empty:
|
||||
continue
|
||||
positive_ratio = float((f_hist["funding_rate"] > 0).mean())
|
||||
mean_funding = float(f_hist["funding_rate"].mean())
|
||||
basis_volatility = float(f_hist["basis"].std(ddof=0))
|
||||
latest_basis = float(f_hist["basis"].iloc[-1])
|
||||
expected_edge = (
|
||||
mean_funding * expected_horizon_bars
|
||||
+ max(latest_basis, 0.0) * 0.35
|
||||
- roundtrip_cost_pct
|
||||
- basis_volatility * basis_risk_multiplier
|
||||
)
|
||||
avg_dollar_volume = float((p_hist["close"] * p_hist["volume"]).mean())
|
||||
rows.append(
|
||||
{
|
||||
"symbol": symbol,
|
||||
"expected_edge": expected_edge,
|
||||
"positive_ratio": positive_ratio,
|
||||
"mean_funding": mean_funding,
|
||||
"low_basis_volatility": float(1.0 / (basis_volatility + 1e-9)),
|
||||
"avg_dollar_volume": avg_dollar_volume,
|
||||
}
|
||||
)
|
||||
if not rows:
|
||||
return pd.DataFrame(columns=["symbol", "score", "expected_edge"])
|
||||
frame = pd.DataFrame(rows)
|
||||
for column in (
|
||||
"expected_edge",
|
||||
"positive_ratio",
|
||||
"mean_funding",
|
||||
"low_basis_volatility",
|
||||
"avg_dollar_volume",
|
||||
):
|
||||
frame[f"{column}_rank"] = frame[column].rank(pct=True)
|
||||
frame["score"] = (
|
||||
0.40 * frame["expected_edge_rank"]
|
||||
+ 0.20 * frame["positive_ratio_rank"]
|
||||
+ 0.20 * frame["mean_funding_rank"]
|
||||
+ 0.10 * frame["low_basis_volatility_rank"]
|
||||
+ 0.10 * frame["avg_dollar_volume_rank"]
|
||||
)
|
||||
return frame.sort_values("score", ascending=False).reset_index(drop=True)
|
||||
|
||||
|
||||
def rank_carry_universe(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
funding: dict[str, pd.DataFrame],
|
||||
*,
|
||||
timestamp: pd.Timestamp,
|
||||
candidate_symbols: list[str],
|
||||
lookback_bars: int,
|
||||
expected_horizon_bars: int,
|
||||
roundtrip_cost_pct: float,
|
||||
basis_risk_multiplier: float,
|
||||
max_symbols: int,
|
||||
) -> list[str]:
|
||||
frame = score_carry_universe(
|
||||
prices,
|
||||
funding,
|
||||
timestamp=timestamp,
|
||||
candidate_symbols=candidate_symbols,
|
||||
lookback_bars=lookback_bars,
|
||||
expected_horizon_bars=expected_horizon_bars,
|
||||
roundtrip_cost_pct=roundtrip_cost_pct,
|
||||
basis_risk_multiplier=basis_risk_multiplier,
|
||||
)
|
||||
if frame.empty:
|
||||
return []
|
||||
if max_symbols > 0:
|
||||
frame = frame.head(max_symbols)
|
||||
return [str(symbol) for symbol in frame["symbol"].tolist()]
|
||||
|
||||
|
||||
def limit_correlated_symbols(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
*,
|
||||
timestamp: pd.Timestamp,
|
||||
candidate_symbols: list[str],
|
||||
lookback_bars: int,
|
||||
max_pairwise_correlation: float,
|
||||
max_symbols: int,
|
||||
) -> list[str]:
|
||||
selected: list[str] = []
|
||||
returns_cache: dict[str, np.ndarray] = {}
|
||||
for symbol in candidate_symbols:
|
||||
hist = prices[symbol].loc[prices[symbol]["timestamp"] <= timestamp].tail(lookback_bars + 1)
|
||||
rets = hist["close"].pct_change().dropna().to_numpy()
|
||||
if len(rets) < max(10, lookback_bars // 3):
|
||||
continue
|
||||
returns_cache[symbol] = rets
|
||||
too_correlated = False
|
||||
for chosen in selected:
|
||||
chosen_rets = returns_cache[chosen]
|
||||
overlap = min(len(rets), len(chosen_rets))
|
||||
corr = float(np.corrcoef(rets[-overlap:], chosen_rets[-overlap:])[0, 1]) if overlap >= 10 else 0.0
|
||||
if np.isfinite(corr) and corr >= max_pairwise_correlation:
|
||||
too_correlated = True
|
||||
break
|
||||
if too_correlated:
|
||||
continue
|
||||
selected.append(symbol)
|
||||
if max_symbols > 0 and len(selected) >= max_symbols:
|
||||
break
|
||||
return selected
|
||||
|
||||
|
||||
def select_strategic_universe(
|
||||
prices: dict[str, pd.DataFrame],
|
||||
funding: dict[str, pd.DataFrame],
|
||||
*,
|
||||
btc_symbol: str,
|
||||
timestamp: pd.Timestamp,
|
||||
min_history_bars: int,
|
||||
lookback_bars: int,
|
||||
min_avg_dollar_volume: float,
|
||||
short_lookback_bars: int,
|
||||
long_lookback_bars: int,
|
||||
overheat_funding_rate: float,
|
||||
carry_lookback_bars: int,
|
||||
carry_expected_horizon_bars: int,
|
||||
carry_roundtrip_cost_pct: float,
|
||||
carry_basis_risk_multiplier: float,
|
||||
momentum_min_score: float,
|
||||
momentum_min_relative_strength: float,
|
||||
momentum_min_7d_return: float,
|
||||
momentum_max_7d_return: float | None,
|
||||
momentum_min_positive_bar_ratio: float | None,
|
||||
momentum_max_short_volatility: float | None,
|
||||
momentum_max_latest_funding_rate: float | None,
|
||||
momentum_max_beta: float | None,
|
||||
carry_min_expected_edge: float,
|
||||
max_symbols: int = 0,
|
||||
) -> list[str]:
|
||||
liquid_symbols = select_dynamic_universe(
|
||||
prices,
|
||||
timestamp=timestamp,
|
||||
min_history_bars=min_history_bars,
|
||||
lookback_bars=lookback_bars,
|
||||
max_symbols=0,
|
||||
min_avg_dollar_volume=min_avg_dollar_volume,
|
||||
base_symbol=btc_symbol,
|
||||
)
|
||||
momentum_frame = score_momentum_universe(
|
||||
prices,
|
||||
funding,
|
||||
btc_symbol=btc_symbol,
|
||||
timestamp=timestamp,
|
||||
candidate_symbols=liquid_symbols,
|
||||
min_history_bars=min_history_bars,
|
||||
liquidity_lookback_bars=lookback_bars,
|
||||
short_lookback_bars=short_lookback_bars,
|
||||
long_lookback_bars=long_lookback_bars,
|
||||
overheat_funding_rate=overheat_funding_rate,
|
||||
)
|
||||
momentum_frame = filter_momentum_frame(
|
||||
momentum_frame,
|
||||
min_score=momentum_min_score,
|
||||
min_relative_strength=momentum_min_relative_strength,
|
||||
min_7d_return=momentum_min_7d_return,
|
||||
max_7d_return=momentum_max_7d_return,
|
||||
min_positive_bar_ratio=momentum_min_positive_bar_ratio,
|
||||
max_short_volatility=momentum_max_short_volatility,
|
||||
max_latest_funding_rate=momentum_max_latest_funding_rate,
|
||||
max_beta=momentum_max_beta,
|
||||
)
|
||||
carry_frame = score_carry_universe(
|
||||
prices,
|
||||
funding,
|
||||
timestamp=timestamp,
|
||||
candidate_symbols=liquid_symbols,
|
||||
lookback_bars=carry_lookback_bars,
|
||||
expected_horizon_bars=carry_expected_horizon_bars,
|
||||
roundtrip_cost_pct=carry_roundtrip_cost_pct,
|
||||
basis_risk_multiplier=carry_basis_risk_multiplier,
|
||||
)
|
||||
|
||||
ranked_symbols: dict[str, float] = {}
|
||||
if not momentum_frame.empty:
|
||||
for row in momentum_frame.itertuples(index=False):
|
||||
ranked_symbols[str(row.symbol)] = max(ranked_symbols.get(str(row.symbol), float("-inf")), float(row.score))
|
||||
|
||||
if not carry_frame.empty:
|
||||
for row in carry_frame.itertuples(index=False):
|
||||
if float(row.expected_edge) <= carry_min_expected_edge:
|
||||
continue
|
||||
ranked_symbols[str(row.symbol)] = max(ranked_symbols.get(str(row.symbol), float("-inf")), float(row.score))
|
||||
|
||||
if not ranked_symbols:
|
||||
return []
|
||||
|
||||
ranked = sorted(ranked_symbols.items(), key=lambda item: item[1], reverse=True)
|
||||
if max_symbols > 0:
|
||||
ranked = ranked[:max_symbols]
|
||||
return [symbol for symbol, _ in ranked]
|
||||
Reference in New Issue
Block a user