Compare commits

..

2 Commits

Author SHA1 Message Date
86a793c561 Narrow risk overlay frontier search 2026-03-17 16:39:11 -07:00
88c7c7790d Add blocker research and routing rename 2026-03-17 16:16:27 -07:00
20 changed files with 3438 additions and 3 deletions

View File

@@ -14,7 +14,7 @@ from strategy29.portfolio.manager import PortfolioManager
from strategy29.portfolio.risk_limits import carry_stop_triggered, momentum_stop_triggered from strategy29.portfolio.risk_limits import carry_stop_triggered, momentum_stop_triggered
from strategy29.signal.btc_regime import BTCRegimeEngine from strategy29.signal.btc_regime import BTCRegimeEngine
from strategy32.config import Strategy32Config, build_engine_config from strategy32.config import Strategy32Config, build_engine_config
from strategy32.signal.router import Strategy32Router from strategy32.routing.router import Strategy32Router
from strategy32.universe import ( from strategy32.universe import (
filter_momentum_frame, filter_momentum_frame,
limit_correlated_symbols, limit_correlated_symbols,

18
data.py
View File

@@ -116,7 +116,23 @@ def build_strategy32_price_frames_from_specs(
if symbol_end < staleness_cutoff: if symbol_end < staleness_cutoff:
rejected.append(symbol) rejected.append(symbol)
continue continue
prices[symbol] = perp[["timestamp", "open", "high", "low", "close", "volume"]].copy() keep_columns = [
column
for column in (
"timestamp",
"open",
"high",
"low",
"close",
"volume",
"quote_volume",
"trade_count",
"taker_base",
"taker_quote",
)
if column in perp.columns
]
prices[symbol] = perp[keep_columns].copy()
latest_completed_bar = symbol_end if latest_completed_bar is None else min(latest_completed_bar, symbol_end) latest_completed_bar = symbol_end if latest_completed_bar is None else min(latest_completed_bar, symbol_end)
accepted.append(symbol) accepted.append(symbol)
quote_by_symbol[symbol] = spec.quote_asset quote_by_symbol[symbol] = spec.quote_asset

View File

@@ -0,0 +1,107 @@
from __future__ import annotations
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.research.soft_router import load_component_bundle, score_candidate
from strategy32.scripts.run_current_cash_learned_blocker import LearnedBlockerCandidate
from strategy32.scripts.run_current_cash_learned_blocker_exact import _exact_period_worker
from strategy32.scripts.run_current_relaxed_hybrid_experiment import WINDOWS, YEAR_PERIODS, YTD_START
OUT_JSON = Path("/tmp/strategy32_current_cash_blocked_scale_exact_sweep.json")
BASELINE_JSON = Path("/tmp/strategy32_live_combo_backtest.json")
CACHE_PATH = "/tmp/strategy32_fixed66_bundle.pkl"
def _candidate_space() -> list[LearnedBlockerCandidate]:
scales = (0.25, 0.40, 0.50, 0.60, 0.70, 0.75, 0.80, 0.90)
return [
LearnedBlockerCandidate(
block_bars=42,
train_min_blocks=12,
lookback_blocks=24,
ridge_alpha=1.0,
prediction_threshold=-0.0025,
blocked_scale=scale,
)
for scale in scales
]
def main() -> None:
_, latest_bar = load_component_bundle(CACHE_PATH)
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for days, label in WINDOWS:
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
for label, start, end_exclusive in YEAR_PERIODS:
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
ctx = mp.get_context("fork")
results: list[dict[str, object]] = []
candidates = _candidate_space()
for idx, candidate in enumerate(candidates, start=1):
print(f"[candidate] {idx}/{len(candidates)} {candidate.name}", flush=True)
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
with ProcessPoolExecutor(max_workers=min(6, len(period_specs)), mp_context=ctx) as executor:
future_map = {
executor.submit(
_exact_period_worker,
CACHE_PATH,
candidate_payload=asdict(candidate),
kind=kind,
label=label,
start_text=str(start),
end_text=str(end),
): (kind, label)
for kind, label, start, end in period_specs
}
for future in as_completed(future_map):
kind, label, metrics = future.result()
if kind == "window":
window_results[label] = metrics
else:
year_results[label] = metrics
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
)
results.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": {label: window_results[label] for _, label in WINDOWS},
"years": year_results,
}
)
results.sort(key=lambda item: float(item["score"]), reverse=True)
output = {
"analysis": "current_cash_blocked_scale_exact_sweep",
"latest_bar": str(latest_bar),
"results": results,
"baseline_exact": json.loads(BASELINE_JSON.read_text(encoding="utf-8")) if BASELINE_JSON.exists() else None,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(output["results"], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,193 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_feature_columns,
_metrics_for_curve,
_oracle_blocker_curve,
_ridge_predict,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_classifier_blocker.json")
@dataclass(frozen=True, slots=True)
class ClassifierBlockerCandidate:
block_bars: int
train_min_blocks: int
lookback_blocks: int
ridge_alpha: float
probability_threshold: float
blocked_scale: float
@property
def name(self) -> str:
return (
f"block:{self.block_bars}"
f"|train:{self.train_min_blocks}"
f"|lookback:{self.lookback_blocks}"
f"|alpha:{self.ridge_alpha:.2f}"
f"|pth:{self.probability_threshold:.2f}"
f"|blocked:{self.blocked_scale:.2f}"
)
def _simulate_candidate(
detail: pd.DataFrame,
block_frame: pd.DataFrame,
regime_columns: list[str],
candidate: ClassifierBlockerCandidate,
) -> pd.Series:
rows = detail.reset_index(drop=True)
features = _feature_columns(regime_columns)
returns: list[float] = []
idx: list[pd.Timestamp] = []
for block_idx, block in block_frame.iterrows():
start_idx = int(block["block_start_index"])
end_idx = int(block["block_end_index"])
bar_block = rows.iloc[start_idx : end_idx + 1]
exposure_scale = 1.0
if block_idx >= candidate.train_min_blocks:
train_start = max(0, block_idx - candidate.lookback_blocks)
train = block_frame.iloc[train_start:block_idx].copy()
train_x = train[features].to_numpy(dtype=float)
train_y = (train["block_total"] > 0.0).astype(float).to_numpy(dtype=float)
test_x = block[features].to_numpy(dtype=float)
pred = _ridge_predict(train_x, train_y, test_x, candidate.ridge_alpha)
if pred < candidate.probability_threshold:
exposure_scale = candidate.blocked_scale
for row in bar_block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * exposure_scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _candidate_space() -> list[ClassifierBlockerCandidate]:
space: list[ClassifierBlockerCandidate] = []
for block_bars in (21, 42):
for train_min_blocks in (8, 12, 18):
for lookback_blocks in (24, 60, 120):
if lookback_blocks < train_min_blocks:
continue
for ridge_alpha in (0.5, 1.0, 5.0, 20.0):
for probability_threshold in (0.45, 0.50, 0.55, 0.60):
for blocked_scale in (0.0, 0.25, 0.50):
space.append(
ClassifierBlockerCandidate(
block_bars=block_bars,
train_min_blocks=train_min_blocks,
lookback_blocks=lookback_blocks,
ridge_alpha=ridge_alpha,
probability_threshold=probability_threshold,
blocked_scale=blocked_scale,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current baseline", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
baseline_curve = _curve_from_returns(detail.set_index("timestamp")["portfolio_return"])
baseline_windows, baseline_years, baseline_score, baseline_negative_years, baseline_mdd_violations = _metrics_for_curve(baseline_curve, latest_bar)
oracle_summary: dict[str, object] = {}
for block in (21, 42):
oracle_curve = _curve_from_returns(_oracle_blocker_curve(detail, block))
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(oracle_curve, latest_bar)
oracle_summary[f"oracle_block_{block}"] = {
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
candidates = _candidate_space()
print(f"[phase] classifier blocker search {len(candidates)} candidates", flush=True)
best_payload: dict[str, object] | None = None
search_top: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
block_frame = _build_block_dataset(detail, candidate.block_bars, regime_columns)
simulated_returns = _simulate_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(simulated_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
payload = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
search_top.append(payload)
search_top.sort(key=lambda item: float(item["score"]), reverse=True)
search_top = search_top[:5]
if best_payload is None or score > float(best_payload["score"]):
best_payload = payload
if idx % max(1, len(candidates) // 8) == 0:
print(f"[search] {idx}/{len(candidates)}", flush=True)
assert best_payload is not None
output = {
"analysis": "current_cash_classifier_blocker",
"latest_bar": str(latest_bar),
**best_payload,
"baseline": {
"score": baseline_score,
"negative_years": baseline_negative_years,
"mdd_violations": baseline_mdd_violations,
"windows": baseline_windows,
"years": baseline_years,
},
"oracle": oracle_summary,
"search_top": search_top,
}
print(json.dumps(output, indent=2))
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,217 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
LearnedBlockerCandidate,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_metrics_for_curve,
_ridge_predict,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_conditional_blocker.json")
@dataclass(frozen=True, slots=True)
class ConditionalBlockerCandidate:
blocker: LearnedBlockerCandidate
max_trailing_total_21: float
min_choppy_score: float
min_distribution_score: float
min_cash_pct: float
@property
def name(self) -> str:
return (
f"{self.blocker.name}"
f"|trail<={self.max_trailing_total_21:.3f}"
f"|chop>={self.min_choppy_score:.2f}"
f"|dist>={self.min_distribution_score:.2f}"
f"|cash>={self.min_cash_pct:.2f}"
)
def _conditional_active(block: pd.Series, candidate: ConditionalBlockerCandidate) -> bool:
return (
float(block["trailing_total_21"]) <= candidate.max_trailing_total_21
and (
float(block["choppy_score"]) >= candidate.min_choppy_score
or float(block["distribution_score"]) >= candidate.min_distribution_score
or float(block["cash_pct"]) >= candidate.min_cash_pct
)
)
def _simulate_conditional_candidate(
detail: pd.DataFrame,
block_frame: pd.DataFrame,
regime_columns: list[str],
candidate: ConditionalBlockerCandidate,
) -> pd.Series:
rows = detail.reset_index(drop=True)
features = [
"core_score",
"breadth_persist",
"funding_persist",
"taker_persist",
"volume_accel_persist",
"mean_taker_imbalance",
"taker_imbalance_dispersion",
"positive_taker_ratio",
"mean_alt_volume_accel",
"positive_volume_accel_ratio",
"funding_dispersion",
"basis_dispersion",
"alt_return_dispersion_7d",
"mean_funding_acceleration",
"mean_basis_trend",
"panic_score",
"choppy_score",
"distribution_score",
"cash_pct",
"invested_pct",
"trailing_total_21",
"trailing_total_42",
"trailing_core_score_21",
"trailing_breadth_21",
"trailing_choppy_21",
*regime_columns,
]
returns: list[float] = []
idx: list[pd.Timestamp] = []
blocker = candidate.blocker
for block_idx, block in block_frame.iterrows():
start_idx = int(block["block_start_index"])
end_idx = int(block["block_end_index"])
bar_block = rows.iloc[start_idx : end_idx + 1]
exposure_scale = 1.0
if block_idx >= blocker.train_min_blocks:
train_start = max(0, block_idx - blocker.lookback_blocks)
train = block_frame.iloc[train_start:block_idx]
train_x = train[features].to_numpy(dtype=float)
train_y = train["block_total"].to_numpy(dtype=float)
test_x = block[features].to_numpy(dtype=float)
pred = _ridge_predict(train_x, train_y, test_x, blocker.ridge_alpha)
if pred <= blocker.prediction_threshold and _conditional_active(block, candidate):
exposure_scale = blocker.blocked_scale
for row in bar_block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * exposure_scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _candidate_space() -> list[ConditionalBlockerCandidate]:
candidates: list[ConditionalBlockerCandidate] = []
for blocked_scale in (0.0, 0.25):
blocker = LearnedBlockerCandidate(
block_bars=42,
train_min_blocks=12,
lookback_blocks=24,
ridge_alpha=1.0,
prediction_threshold=-0.0025,
blocked_scale=blocked_scale,
)
for max_trailing_total_21 in (0.0, -0.01, -0.02):
for min_choppy_score in (0.20, 0.30, 0.40):
for min_distribution_score in (0.10, 0.20, 0.30):
for min_cash_pct in (0.20, 0.40, 0.60):
candidates.append(
ConditionalBlockerCandidate(
blocker=blocker,
max_trailing_total_21=max_trailing_total_21,
min_choppy_score=min_choppy_score,
min_distribution_score=min_distribution_score,
min_cash_pct=min_cash_pct,
)
)
return candidates
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current baseline", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
block_frame = _build_block_dataset(detail, 42, regime_columns)
baseline_curve = _curve_from_returns(detail.set_index("timestamp")["portfolio_return"])
baseline_windows, baseline_years, baseline_score, *_ = _metrics_for_curve(baseline_curve, latest_bar)
top: list[dict[str, object]] = []
candidates = _candidate_space()
print(f"[phase] conditional blocker search {len(candidates)} candidates", flush=True)
for idx, candidate in enumerate(candidates, start=1):
sim_returns = _simulate_conditional_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(sim_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
payload = {
"candidate": {
**asdict(candidate.blocker),
"max_trailing_total_21": candidate.max_trailing_total_21,
"min_choppy_score": candidate.min_choppy_score,
"min_distribution_score": candidate.min_distribution_score,
"min_cash_pct": candidate.min_cash_pct,
},
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
top.append(payload)
top.sort(key=lambda item: float(item["score"]), reverse=True)
top = top[:10]
if idx % 50 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
output = {
"analysis": "current_cash_conditional_blocker",
"latest_bar": str(latest_bar),
"baseline": {
"score": baseline_score,
"windows": baseline_windows,
"years": baseline_years,
},
"top10": top,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(top[:5], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,205 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
LearnedBlockerCandidate,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_metrics_for_curve,
_simulate_candidate,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_ensemble_search.json")
@dataclass(frozen=True, slots=True)
class StaticBlendCandidate:
blocker: LearnedBlockerCandidate
baseline_weight: float
@property
def name(self) -> str:
return f"static|bw:{self.baseline_weight:.2f}|{self.blocker.name}"
@dataclass(frozen=True, slots=True)
class RelativeBlendCandidate:
blocker: LearnedBlockerCandidate
lookback_bars: int
threshold: float
baseline_weight_high: float
baseline_weight_mid: float
baseline_weight_low: float
@property
def name(self) -> str:
return (
f"relative|lb:{self.lookback_bars}|th:{self.threshold:.3f}"
f"|hi:{self.baseline_weight_high:.2f}|mid:{self.baseline_weight_mid:.2f}|lo:{self.baseline_weight_low:.2f}"
f"|{self.blocker.name}"
)
def _segment_score(windows: dict[str, dict[str, float]], years: dict[str, dict[str, float]]) -> tuple[float, int, int]:
negative_years = sum(1 for year in ("2021", "2022", "2023", "2024", "2025") if years[year]["total_return"] < 0.0)
mdd_violations = sum(1 for label in ("1y", "2y", "3y", "4y", "5y") if windows[label]["max_drawdown"] < -0.20)
score = 0.0
score += 4.0 * windows["1y"]["total_return"]
score += 2.0 * windows["2y"]["annualized_return"]
score += 1.5 * windows["3y"]["annualized_return"]
score += 2.5 * windows["5y"]["annualized_return"]
score += 0.75 * years["2025"]["total_return"]
score += 0.50 * years["2024"]["total_return"]
score += 0.20 * years["2026_YTD"]["total_return"]
score += 0.25 * max(0.0, -0.15 - windows["5y"]["max_drawdown"])
score -= 1.5 * negative_years
score -= 0.4 * mdd_violations
return score, negative_years, mdd_violations
def _compute_metrics(returns: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
curve = _curve_from_returns(returns)
windows, years, _, _, _ = _metrics_for_curve(curve, latest_bar)
score, negative_years, mdd_violations = _segment_score(windows, years)
return windows, years, score, negative_years, mdd_violations
def _static_blend_returns(
baseline_returns: pd.Series,
blocker_returns: pd.Series,
candidate: StaticBlendCandidate,
) -> pd.Series:
bw = candidate.baseline_weight
return baseline_returns.mul(bw).add(blocker_returns.mul(1.0 - bw), fill_value=0.0)
def _relative_blend_returns(
baseline_returns: pd.Series,
blocker_returns: pd.Series,
candidate: RelativeBlendCandidate,
) -> pd.Series:
baseline_trailing = baseline_returns.shift(1).rolling(candidate.lookback_bars, min_periods=max(6, candidate.lookback_bars // 3)).sum()
blocker_trailing = blocker_returns.shift(1).rolling(candidate.lookback_bars, min_periods=max(6, candidate.lookback_bars // 3)).sum()
diff = baseline_trailing.sub(blocker_trailing, fill_value=0.0).fillna(0.0)
weights = pd.Series(candidate.baseline_weight_mid, index=baseline_returns.index, dtype=float)
weights = weights.mask(diff >= candidate.threshold, candidate.baseline_weight_high)
weights = weights.mask(diff <= -candidate.threshold, candidate.baseline_weight_low)
return baseline_returns.mul(weights).add(blocker_returns.mul(1.0 - weights), fill_value=0.0)
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build baseline/detail", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
baseline_returns = detail.set_index("timestamp")["portfolio_return"].astype(float)
blocker_candidates = [
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.25),
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.50),
]
blocker_returns_map: dict[str, pd.Series] = {}
block_frame_cache: dict[int, pd.DataFrame] = {}
for blocker in blocker_candidates:
if blocker.block_bars not in block_frame_cache:
block_frame_cache[blocker.block_bars] = _build_block_dataset(detail, blocker.block_bars, regime_columns)
blocker_returns_map[blocker.name] = _simulate_candidate(detail, block_frame_cache[blocker.block_bars], regime_columns, blocker)
baseline_windows, baseline_years, baseline_score, baseline_negative_years, baseline_mdd_violations = _compute_metrics(baseline_returns, latest_bar)
static_candidates = [
StaticBlendCandidate(blocker=blocker, baseline_weight=weight)
for blocker in blocker_candidates
for weight in (0.50, 0.60, 0.70, 0.75, 0.80, 0.85, 0.90)
]
relative_candidates = [
RelativeBlendCandidate(
blocker=blocker,
lookback_bars=lookback,
threshold=threshold,
baseline_weight_high=high,
baseline_weight_mid=mid,
baseline_weight_low=low,
)
for blocker in blocker_candidates
for lookback in (21, 42, 63)
for threshold in (0.0, 0.01, 0.02)
for high, mid, low in (
(0.90, 0.75, 0.55),
(0.85, 0.70, 0.50),
(0.80, 0.65, 0.45),
)
]
top: list[dict[str, object]] = []
all_candidates: list[object] = [*static_candidates, *relative_candidates]
print(f"[phase] ensemble search {len(all_candidates)} candidates", flush=True)
for idx, candidate in enumerate(all_candidates, start=1):
if isinstance(candidate, StaticBlendCandidate):
returns = _static_blend_returns(baseline_returns, blocker_returns_map[candidate.blocker.name], candidate)
else:
returns = _relative_blend_returns(baseline_returns, blocker_returns_map[candidate.blocker.name], candidate)
windows, years, score, negative_years, mdd_violations = _compute_metrics(returns, latest_bar)
payload = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
top.append(payload)
top.sort(key=lambda item: float(item["score"]), reverse=True)
top = top[:12]
if idx % 25 == 0 or idx == len(all_candidates):
print(f"[search] {idx}/{len(all_candidates)}", flush=True)
output = {
"analysis": "current_cash_ensemble_search",
"latest_bar": str(latest_bar),
"baseline": {
"score": baseline_score,
"negative_years": baseline_negative_years,
"mdd_violations": baseline_mdd_violations,
"windows": baseline_windows,
"years": baseline_years,
},
"top12": top,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(top[:5], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,241 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
LearnedBlockerCandidate,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_metrics_for_curve,
_ridge_predict,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_guarded_blocker.json")
@dataclass(frozen=True, slots=True)
class GuardedBlockerCandidate:
blocker: LearnedBlockerCandidate
guard_regime_mode: str
min_core_score: float
min_breadth_persist: float
min_funding_persist: float
max_choppy_score: float
max_distribution_score: float
max_panic_score: float
@property
def name(self) -> str:
return (
f"{self.blocker.name}"
f"|guard:{self.guard_regime_mode}"
f"|core:{self.min_core_score:.2f}"
f"|breadth:{self.min_breadth_persist:.2f}"
f"|funding:{self.min_funding_persist:.2f}"
f"|chop<={self.max_choppy_score:.2f}"
f"|dist<={self.max_distribution_score:.2f}"
f"|panic<={self.max_panic_score:.2f}"
)
def _guard_active(block: pd.Series, candidate: GuardedBlockerCandidate) -> bool:
regime = str(block.get("strategic_regime", ""))
if candidate.guard_regime_mode == "momentum_only":
regime_ok = regime == "MOMENTUM_EXPANSION"
elif candidate.guard_regime_mode == "momentum_or_euphoric":
regime_ok = regime in {"MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"}
else:
regime_ok = False
return (
regime_ok
and float(block["core_score"]) >= candidate.min_core_score
and float(block["breadth_persist"]) >= candidate.min_breadth_persist
and float(block["funding_persist"]) >= candidate.min_funding_persist
and float(block["choppy_score"]) <= candidate.max_choppy_score
and float(block["distribution_score"]) <= candidate.max_distribution_score
and float(block["panic_score"]) <= candidate.max_panic_score
)
def _simulate_guarded_candidate(
detail: pd.DataFrame,
block_frame: pd.DataFrame,
regime_columns: list[str],
candidate: GuardedBlockerCandidate,
) -> pd.Series:
rows = detail.reset_index(drop=True)
features = [
"core_score",
"breadth_persist",
"funding_persist",
"taker_persist",
"volume_accel_persist",
"mean_taker_imbalance",
"taker_imbalance_dispersion",
"positive_taker_ratio",
"mean_alt_volume_accel",
"positive_volume_accel_ratio",
"funding_dispersion",
"basis_dispersion",
"alt_return_dispersion_7d",
"mean_funding_acceleration",
"mean_basis_trend",
"panic_score",
"choppy_score",
"distribution_score",
"cash_pct",
"invested_pct",
"trailing_total_21",
"trailing_total_42",
"trailing_core_score_21",
"trailing_breadth_21",
"trailing_choppy_21",
*regime_columns,
]
returns: list[float] = []
idx: list[pd.Timestamp] = []
blocker = candidate.blocker
for block_idx, block in block_frame.iterrows():
start_idx = int(block["block_start_index"])
end_idx = int(block["block_end_index"])
bar_block = rows.iloc[start_idx : end_idx + 1]
exposure_scale = 1.0
if block_idx >= blocker.train_min_blocks:
train_start = max(0, block_idx - blocker.lookback_blocks)
train = block_frame.iloc[train_start:block_idx]
train_x = train[features].to_numpy(dtype=float)
train_y = train["block_total"].to_numpy(dtype=float)
test_x = block[features].to_numpy(dtype=float)
pred = _ridge_predict(train_x, train_y, test_x, blocker.ridge_alpha)
if pred <= blocker.prediction_threshold:
exposure_scale = blocker.blocked_scale
if _guard_active(block, candidate):
exposure_scale = 1.0
for row in bar_block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * exposure_scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _candidate_space() -> list[GuardedBlockerCandidate]:
candidates: list[GuardedBlockerCandidate] = []
for blocked_scale in (0.0, 0.25):
blocker = LearnedBlockerCandidate(
block_bars=42,
train_min_blocks=12,
lookback_blocks=24,
ridge_alpha=1.0,
prediction_threshold=-0.0025,
blocked_scale=blocked_scale,
)
for guard_regime_mode in ("momentum_only", "momentum_or_euphoric"):
for min_core_score in (0.55, 0.60, 0.65):
for min_breadth_persist in (0.45, 0.50, 0.55):
for min_funding_persist in (0.50, 0.55, 0.60):
for max_choppy_score in (0.35, 0.40):
for max_distribution_score in (0.25, 0.30):
candidates.append(
GuardedBlockerCandidate(
blocker=blocker,
guard_regime_mode=guard_regime_mode,
min_core_score=min_core_score,
min_breadth_persist=min_breadth_persist,
min_funding_persist=min_funding_persist,
max_choppy_score=max_choppy_score,
max_distribution_score=max_distribution_score,
max_panic_score=0.20,
)
)
return candidates
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current baseline", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
block_frame = _build_block_dataset(detail, 42, regime_columns)
baseline_curve = _curve_from_returns(detail.set_index("timestamp")["portfolio_return"])
baseline_windows, baseline_years, baseline_score, *_ = _metrics_for_curve(baseline_curve, latest_bar)
top: list[dict[str, object]] = []
candidates = _candidate_space()
print(f"[phase] guarded blocker search {len(candidates)} candidates", flush=True)
for idx, candidate in enumerate(candidates, start=1):
sim_returns = _simulate_guarded_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(sim_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
payload = {
"candidate": {
**asdict(candidate.blocker),
"guard_regime_mode": candidate.guard_regime_mode,
"min_core_score": candidate.min_core_score,
"min_breadth_persist": candidate.min_breadth_persist,
"min_funding_persist": candidate.min_funding_persist,
"max_choppy_score": candidate.max_choppy_score,
"max_distribution_score": candidate.max_distribution_score,
"max_panic_score": candidate.max_panic_score,
},
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
top.append(payload)
top.sort(key=lambda item: float(item["score"]), reverse=True)
top = top[:10]
if idx % 50 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
output = {
"analysis": "current_cash_guarded_blocker",
"latest_bar": str(latest_bar),
"baseline": {
"score": baseline_score,
"windows": baseline_windows,
"years": baseline_years,
},
"top10": top,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(top[:5], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,405 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import numpy as np
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import (
build_cash_overlay_period_components,
compose_cash_overlay_curve,
load_component_bundle,
score_candidate,
segment_metrics,
)
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_learned_blocker.json")
@dataclass(frozen=True, slots=True)
class LearnedBlockerCandidate:
block_bars: int
train_min_blocks: int
lookback_blocks: int
ridge_alpha: float
prediction_threshold: float
blocked_scale: float
@property
def name(self) -> str:
return (
f"block:{self.block_bars}"
f"|train:{self.train_min_blocks}"
f"|lookback:{self.lookback_blocks}"
f"|alpha:{self.ridge_alpha:.2f}"
f"|th:{self.prediction_threshold:.4f}"
f"|blocked:{self.blocked_scale:.2f}"
)
def _build_strategy_detail(components: dict[str, object]) -> pd.DataFrame:
timestamps = list(components["timestamps"])
score_map = components["score_frame"].set_index("timestamp").sort_index()
cash_map = components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
curve, detail = compose_cash_overlay_curve(
timestamps=timestamps,
score_frame=components["score_frame"],
core_returns=components["core_returns"],
core_exposure_frame=components["core_exposure_frame"],
cap_returns=components["cap_returns"],
chop_returns=components["chop_returns"],
dist_returns=components["dist_returns"],
candidate=BEST_CASH_OVERLAY,
)
_ = curve
rows: list[dict[str, object]] = []
detail_map = detail.set_index("timestamp").sort_index()
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
detail_row = detail_map.loc[execution_ts].to_dict() if execution_ts in detail_map.index else {}
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
rows.append(
{
"timestamp": execution_ts,
"strategic_regime": str(score_row.get("strategic_regime", "")),
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"taker_persist": float(score_row.get("taker_persist", 0.0) or 0.0),
"volume_accel_persist": float(score_row.get("volume_accel_persist", 0.0) or 0.0),
"mean_taker_imbalance": float(score_row.get("mean_taker_imbalance", 0.0) or 0.0),
"taker_imbalance_dispersion": float(score_row.get("taker_imbalance_dispersion", 0.0) or 0.0),
"positive_taker_ratio": float(score_row.get("positive_taker_ratio", 0.0) or 0.0),
"mean_alt_volume_accel": float(score_row.get("mean_alt_volume_accel", 0.0) or 0.0),
"positive_volume_accel_ratio": float(score_row.get("positive_volume_accel_ratio", 0.0) or 0.0),
"funding_dispersion": float(score_row.get("funding_dispersion", 0.0) or 0.0),
"basis_dispersion": float(score_row.get("basis_dispersion", 0.0) or 0.0),
"alt_return_dispersion_7d": float(score_row.get("alt_return_dispersion_7d", 0.0) or 0.0),
"mean_funding_acceleration": float(score_row.get("mean_funding_acceleration", 0.0) or 0.0),
"mean_basis_trend": float(score_row.get("mean_basis_trend", 0.0) or 0.0),
"cash_pct": core_cash_pct,
"invested_pct": max(0.0, 1.0 - core_cash_pct),
"portfolio_return": float(detail_row.get("portfolio_return", 0.0)),
}
)
return pd.DataFrame(rows)
def _curve_from_returns(returns: pd.Series) -> pd.Series:
equity = 1000.0
vals = [equity]
idx = [returns.index[0] - pd.Timedelta(hours=4)]
for ts, ret in returns.items():
equity *= max(0.0, 1.0 + float(ret))
idx.append(pd.Timestamp(ts))
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
years = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _ridge_predict(train_x: np.ndarray, train_y: np.ndarray, test_x: np.ndarray, alpha: float) -> float:
if len(train_x) == 0:
return 0.0
train_x = np.nan_to_num(train_x, nan=0.0, posinf=0.0, neginf=0.0)
train_y = np.nan_to_num(train_y, nan=0.0, posinf=0.0, neginf=0.0)
test_x = np.nan_to_num(test_x, nan=0.0, posinf=0.0, neginf=0.0)
mean = train_x.mean(axis=0)
std = train_x.std(axis=0)
std[std < 1e-9] = 1.0
x_train = np.clip((train_x - mean) / std, -8.0, 8.0)
x_test = np.clip((test_x - mean) / std, -8.0, 8.0)
train_y = np.clip(train_y, -0.50, 0.50)
x_train = np.column_stack([np.ones(len(x_train)), x_train])
x_test = np.concatenate([[1.0], x_test])
penalty = np.eye(x_train.shape[1]) * alpha
penalty[0, 0] = 0.0
lhs = x_train.T @ x_train + penalty
rhs = x_train.T @ train_y
try:
beta = np.linalg.solve(lhs, rhs)
except np.linalg.LinAlgError:
beta = np.linalg.pinv(lhs) @ rhs
return float(x_test @ beta)
def _build_regime_columns(detail: pd.DataFrame) -> list[str]:
regime_dummies = pd.get_dummies(detail["strategic_regime"], prefix="regime", dtype=float)
for column in regime_dummies.columns:
detail[column] = regime_dummies[column]
return sorted(regime_dummies.columns.tolist())
def _build_block_dataset(detail: pd.DataFrame, block_bars: int, regime_columns: list[str]) -> pd.DataFrame:
rows: list[dict[str, object]] = []
frame = detail.copy()
frame["trailing_total_21"] = frame["portfolio_return"].shift(1).rolling(21, min_periods=6).sum()
frame["trailing_total_42"] = frame["portfolio_return"].shift(1).rolling(42, min_periods=6).sum()
frame["trailing_core_score_21"] = frame["core_score"].shift(1).rolling(21, min_periods=6).mean()
frame["trailing_breadth_21"] = frame["breadth_persist"].shift(1).rolling(21, min_periods=6).mean()
frame["trailing_choppy_21"] = frame["choppy_score"].shift(1).rolling(21, min_periods=6).mean()
for start in range(0, len(frame), block_bars):
block = frame.iloc[start : start + block_bars]
if block.empty:
continue
trigger = block.iloc[0]
block_total = float((1.0 + block["portfolio_return"]).prod() - 1.0)
row = {
"timestamp": trigger["timestamp"],
"block_total": block_total,
"core_score": float(trigger["core_score"]),
"breadth_persist": float(trigger["breadth_persist"]),
"funding_persist": float(trigger["funding_persist"]),
"taker_persist": float(trigger["taker_persist"]),
"volume_accel_persist": float(trigger["volume_accel_persist"]),
"mean_taker_imbalance": float(trigger["mean_taker_imbalance"]),
"taker_imbalance_dispersion": float(trigger["taker_imbalance_dispersion"]),
"positive_taker_ratio": float(trigger["positive_taker_ratio"]),
"mean_alt_volume_accel": float(trigger["mean_alt_volume_accel"]),
"positive_volume_accel_ratio": float(trigger["positive_volume_accel_ratio"]),
"funding_dispersion": float(trigger["funding_dispersion"]),
"basis_dispersion": float(trigger["basis_dispersion"]),
"alt_return_dispersion_7d": float(trigger["alt_return_dispersion_7d"]),
"mean_funding_acceleration": float(trigger["mean_funding_acceleration"]),
"mean_basis_trend": float(trigger["mean_basis_trend"]),
"panic_score": float(trigger["panic_score"]),
"choppy_score": float(trigger["choppy_score"]),
"distribution_score": float(trigger["distribution_score"]),
"cash_pct": float(trigger["cash_pct"]),
"invested_pct": float(trigger["invested_pct"]),
"trailing_total_21": float(trigger["trailing_total_21"]) if pd.notna(trigger["trailing_total_21"]) else 0.0,
"trailing_total_42": float(trigger["trailing_total_42"]) if pd.notna(trigger["trailing_total_42"]) else 0.0,
"trailing_core_score_21": float(trigger["trailing_core_score_21"]) if pd.notna(trigger["trailing_core_score_21"]) else 0.0,
"trailing_breadth_21": float(trigger["trailing_breadth_21"]) if pd.notna(trigger["trailing_breadth_21"]) else 0.0,
"trailing_choppy_21": float(trigger["trailing_choppy_21"]) if pd.notna(trigger["trailing_choppy_21"]) else 0.0,
"block_start_index": int(start),
"block_end_index": int(block.index[-1]),
}
for column in regime_columns:
row[column] = float(trigger.get(column, 0.0))
rows.append(row)
return pd.DataFrame(rows)
def _feature_columns(regime_columns: list[str]) -> list[str]:
return [
"core_score",
"breadth_persist",
"funding_persist",
"taker_persist",
"volume_accel_persist",
"mean_taker_imbalance",
"taker_imbalance_dispersion",
"positive_taker_ratio",
"mean_alt_volume_accel",
"positive_volume_accel_ratio",
"funding_dispersion",
"basis_dispersion",
"alt_return_dispersion_7d",
"mean_funding_acceleration",
"mean_basis_trend",
"panic_score",
"choppy_score",
"distribution_score",
"cash_pct",
"invested_pct",
"trailing_total_21",
"trailing_total_42",
"trailing_core_score_21",
"trailing_breadth_21",
"trailing_choppy_21",
*regime_columns,
]
def _simulate_candidate(
detail: pd.DataFrame,
block_frame: pd.DataFrame,
regime_columns: list[str],
candidate: LearnedBlockerCandidate,
) -> pd.Series:
rows = detail.reset_index(drop=True)
features = _feature_columns(regime_columns)
returns: list[float] = []
idx: list[pd.Timestamp] = []
for block_idx, block in block_frame.iterrows():
start_idx = int(block["block_start_index"])
end_idx = int(block["block_end_index"])
bar_block = rows.iloc[start_idx : end_idx + 1]
exposure_scale = 1.0
if block_idx >= candidate.train_min_blocks:
train_start = max(0, block_idx - candidate.lookback_blocks)
train = block_frame.iloc[train_start:block_idx]
train_x = train[features].to_numpy(dtype=float)
train_y = train["block_total"].to_numpy(dtype=float)
test_x = block[features].to_numpy(dtype=float)
pred = _ridge_predict(train_x, train_y, test_x, candidate.ridge_alpha)
if pred <= candidate.prediction_threshold:
exposure_scale = candidate.blocked_scale
for row in bar_block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * exposure_scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _oracle_blocker_curve(detail: pd.DataFrame, block_bars: int) -> pd.Series:
rows = detail.reset_index(drop=True)
returns: list[float] = []
idx: list[pd.Timestamp] = []
for start in range(0, len(rows), block_bars):
block = rows.iloc[start : start + block_bars]
if block.empty:
continue
total = float((1.0 + block["portfolio_return"]).prod() - 1.0)
scale = 1.0 if total > 0.0 else 0.0
for row in block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _candidate_space() -> list[LearnedBlockerCandidate]:
space: list[LearnedBlockerCandidate] = []
for block_bars in (21, 42, 84):
for train_min_blocks in (8, 12, 18):
for lookback_blocks in (24, 60):
if lookback_blocks < train_min_blocks:
continue
for ridge_alpha in (0.5, 1.0, 5.0, 20.0):
for prediction_threshold in (-0.0025, 0.0, 0.0025, 0.0050, 0.0100):
for blocked_scale in (0.0, 0.25, 0.50):
space.append(
LearnedBlockerCandidate(
block_bars=block_bars,
train_min_blocks=train_min_blocks,
lookback_blocks=lookback_blocks,
ridge_alpha=ridge_alpha,
prediction_threshold=prediction_threshold,
blocked_scale=blocked_scale,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current baseline", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
baseline_curve = _curve_from_returns(detail.set_index("timestamp")["portfolio_return"])
baseline_windows, baseline_years, *_ = _metrics_for_curve(baseline_curve, latest_bar)
oracle_summary: dict[str, object] = {}
for block in (21, 42, 84):
oracle_curve = _curve_from_returns(_oracle_blocker_curve(detail, block))
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(oracle_curve, latest_bar)
oracle_summary[f"oracle_block_{block}"] = {
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
candidates = _candidate_space()
print(f"[phase] blocker search {len(candidates)} candidates", flush=True)
best_payload: dict[str, object] | None = None
search_top: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
block_frame = _build_block_dataset(detail, candidate.block_bars, regime_columns)
simulated_returns = _simulate_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(simulated_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
payload = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
search_top.append(payload)
search_top.sort(key=lambda item: float(item["score"]), reverse=True)
search_top = search_top[:5]
if best_payload is None or score > float(best_payload["score"]):
best_payload = payload
if idx % max(1, len(candidates) // 8) == 0:
print(f"[search] {idx}/{len(candidates)}", flush=True)
assert best_payload is not None
output = {
"analysis": "current_cash_learned_blocker",
"latest_bar": str(latest_bar),
**best_payload,
"baseline": {
"windows": baseline_windows,
"years": baseline_years,
},
"oracle": oracle_summary,
"search_top": search_top,
}
print(json.dumps(output, indent=2))
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,132 @@
from __future__ import annotations
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.research.soft_router import load_component_bundle, score_candidate, segment_metrics
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
LearnedBlockerCandidate,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_metrics_for_curve,
_simulate_candidate,
)
from strategy32.scripts.run_current_relaxed_hybrid_experiment import WINDOWS, YEAR_PERIODS, YTD_START
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components
OUT_JSON = Path("/tmp/strategy32_current_cash_learned_blocker_exact.json")
SOURCE_JSON = Path("/tmp/strategy32_current_cash_learned_blocker.json")
BASELINE_JSON = Path("/tmp/strategy32_live_combo_backtest.json")
def _load_best_candidate() -> LearnedBlockerCandidate:
payload = json.loads(SOURCE_JSON.read_text(encoding="utf-8"))
return LearnedBlockerCandidate(**payload["candidate"])
def _exact_period_worker(
cache_path: str,
candidate_payload: dict[str, object],
kind: str,
label: str,
start_text: str,
end_text: str,
) -> tuple[str, str, dict[str, float]]:
bundle, _ = load_component_bundle(cache_path)
candidate = LearnedBlockerCandidate(**candidate_payload)
eval_start = pd.Timestamp(start_text)
eval_end = pd.Timestamp(end_text)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=eval_end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
block_frame = _build_block_dataset(detail, candidate.block_bars, regime_columns)
simulated_returns = _simulate_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(simulated_returns)
return kind, label, segment_metrics(curve, eval_start, eval_end)
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
candidate = _load_best_candidate()
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for days, label in WINDOWS:
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
for label, start, end_exclusive in YEAR_PERIODS:
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
ctx = mp.get_context("fork")
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
with ProcessPoolExecutor(max_workers=min(6, len(period_specs)), mp_context=ctx) as executor:
future_map = {
executor.submit(
_exact_period_worker,
CACHE_PATH,
candidate_payload=asdict(candidate),
kind=kind,
label=label,
start_text=str(start),
end_text=str(end),
): (kind, label)
for kind, label, start, end in period_specs
}
for future in as_completed(future_map):
kind, label, metrics = future.result()
if kind == "window":
window_results[label] = metrics
else:
year_results[label] = metrics
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
)
baseline = json.loads(BASELINE_JSON.read_text(encoding="utf-8")) if BASELINE_JSON.exists() else None
output = {
"analysis": "current_cash_learned_blocker_exact",
"latest_bar": str(latest_bar),
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": {label: window_results[label] for _, label in WINDOWS},
"years": year_results,
"baseline_exact": baseline,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(output, indent=2))
print(f"[saved] {OUT_JSON}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,101 @@
from __future__ import annotations
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.research.soft_router import load_component_bundle, score_candidate
from strategy32.scripts.run_current_cash_learned_blocker import CACHE_PATH, LearnedBlockerCandidate
from strategy32.scripts.run_current_cash_learned_blocker_exact import _exact_period_worker
from strategy32.scripts.run_current_relaxed_hybrid_experiment import WINDOWS, YEAR_PERIODS, YTD_START
OUT_JSON = Path("/tmp/strategy32_current_cash_blocker_exact_candidates.json")
BASELINE_JSON = Path("/tmp/strategy32_live_combo_backtest.json")
def _candidate_space() -> list[LearnedBlockerCandidate]:
return [
LearnedBlockerCandidate(42, 8, 24, 1.0, -0.0025, 0.0),
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.0),
LearnedBlockerCandidate(42, 18, 24, 1.0, -0.0025, 0.0),
LearnedBlockerCandidate(42, 12, 24, 20.0, -0.0025, 0.0),
LearnedBlockerCandidate(42, 18, 24, 20.0, -0.0025, 0.0),
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.25),
LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.50),
]
def main() -> None:
_, latest_bar = load_component_bundle(CACHE_PATH)
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for days, label in WINDOWS:
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
for label, start, end_exclusive in YEAR_PERIODS:
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
ctx = mp.get_context("fork")
results: list[dict[str, object]] = []
candidates = _candidate_space()
for idx, candidate in enumerate(candidates, start=1):
print(f"[candidate] {idx}/{len(candidates)} {candidate.name}", flush=True)
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
with ProcessPoolExecutor(max_workers=min(6, len(period_specs)), mp_context=ctx) as executor:
future_map = {
executor.submit(
_exact_period_worker,
CACHE_PATH,
candidate_payload=asdict(candidate),
kind=kind,
label=label,
start_text=str(start),
end_text=str(end),
): (kind, label)
for kind, label, start, end in period_specs
}
for future in as_completed(future_map):
kind, label, metrics = future.result()
if kind == "window":
window_results[label] = metrics
else:
year_results[label] = metrics
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{k: year_results[k] for k, _, _ in YEAR_PERIODS},
)
results.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": {label: window_results[label] for _, label in WINDOWS},
"years": year_results,
}
)
results.sort(key=lambda item: float(item["score"]), reverse=True)
output = {
"analysis": "current_cash_learned_blocker_exact_candidates",
"latest_bar": str(latest_bar),
"results": results,
"baseline_exact": json.loads(BASELINE_JSON.read_text(encoding="utf-8")) if BASELINE_JSON.exists() else None,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(output["results"], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,146 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import (
build_cash_overlay_period_components,
load_component_bundle,
score_candidate,
segment_metrics,
)
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
LearnedBlockerCandidate,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_simulate_candidate,
)
from strategy32.scripts.run_current_relaxed_hybrid_experiment import WINDOWS, YEAR_PERIODS, YTD_START
OUT_JSON = Path("/tmp/strategy32_current_cash_blocker_exact_plateau.json")
BASELINE_JSON = Path("/tmp/strategy32_live_combo_backtest.json")
def _candidate_space() -> list[LearnedBlockerCandidate]:
candidates: list[LearnedBlockerCandidate] = []
for block_bars in (21, 42):
for train_min_blocks in (8, 12, 18):
for ridge_alpha in (0.5, 1.0, 5.0, 20.0):
for prediction_threshold in (-0.0050, -0.0025, 0.0):
for blocked_scale in (0.0, 0.25, 0.50):
candidates.append(
LearnedBlockerCandidate(
block_bars=block_bars,
train_min_blocks=train_min_blocks,
lookback_blocks=24,
ridge_alpha=ridge_alpha,
prediction_threshold=prediction_threshold,
blocked_scale=blocked_scale,
)
)
return candidates
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
period_specs: list[tuple[str, str, pd.Timestamp, pd.Timestamp]] = []
for days, label in WINDOWS:
period_specs.append(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar))
for label, start, end_exclusive in YEAR_PERIODS:
period_specs.append(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))))
period_specs.append(("year", "2026_YTD", YTD_START, latest_bar))
period_cache: dict[str, dict[str, object]] = {}
for kind, label, start, end in period_specs:
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
period_cache[label] = {
"kind": kind,
"start": start,
"end": end,
"detail": detail,
"regime_columns": regime_columns,
"block_frames": {},
}
print(f"[periods] cached {len(period_cache)} periods", flush=True)
candidates = _candidate_space()
print(f"[candidates] {len(candidates)} exact candidates", flush=True)
rows: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
windows: dict[str, dict[str, float]] = {}
years: dict[str, dict[str, float]] = {}
for label, cached in period_cache.items():
detail = cached["detail"]
regime_columns = cached["regime_columns"]
block_frames = cached["block_frames"]
if candidate.block_bars not in block_frames:
block_frames[candidate.block_bars] = _build_block_dataset(detail, candidate.block_bars, regime_columns)
block_frame = block_frames[candidate.block_bars]
simulated_returns = _simulate_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(simulated_returns)
metrics = segment_metrics(curve, cached["start"], cached["end"])
if cached["kind"] == "window":
windows[label] = metrics
else:
years[label] = metrics
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
)
if idx % 24 == 0 or idx == len(candidates):
print(f"[progress] {idx}/{len(candidates)}", flush=True)
rows.sort(key=lambda item: float(item["score"]), reverse=True)
output = {
"analysis": "current_cash_blocker_exact_plateau",
"latest_bar": str(latest_bar),
"all_count": len(rows),
"top10": rows[:10],
"baseline_exact": json.loads(BASELINE_JSON.read_text(encoding="utf-8")) if BASELINE_JSON.exists() else None,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(output["top10"], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,285 @@
from __future__ import annotations
import json
import math
import os
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import numpy as np
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle
from strategy32.scripts.run_current_cash_learned_blocker import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
LearnedBlockerCandidate,
_build_block_dataset,
_build_regime_columns,
_build_strategy_detail,
_curve_from_returns,
_metrics_for_curve,
_simulate_candidate,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_risk_overlay_search.json")
@dataclass(frozen=True, slots=True)
class RiskOverlayCandidate:
base_name: str
vol_lookback_bars: int
vol_target_mult: float
min_scale: float
max_scale: float
dd_lookback_bars: int
dd_cut: float
dd_scale: float
@property
def name(self) -> str:
return (
f"{self.base_name}"
f"|vol:{self.vol_lookback_bars}"
f"|vm:{self.vol_target_mult:.2f}"
f"|min:{self.min_scale:.2f}"
f"|max:{self.max_scale:.2f}"
f"|ddlb:{self.dd_lookback_bars}"
f"|ddcut:{self.dd_cut:.3f}"
f"|ddscale:{self.dd_scale:.2f}"
)
def _segment_score(windows: dict[str, dict[str, float]], years: dict[str, dict[str, float]]) -> tuple[float, int, int]:
negative_years = sum(1 for year in ("2021", "2022", "2023", "2024", "2025") if years[year]["total_return"] < 0.0)
mdd_violations = sum(1 for label in ("1y", "2y", "3y", "4y", "5y") if windows[label]["max_drawdown"] < -0.20)
score = 0.0
score += 4.0 * windows["1y"]["total_return"]
score += 2.0 * windows["2y"]["annualized_return"]
score += 1.5 * windows["3y"]["annualized_return"]
score += 2.5 * windows["5y"]["annualized_return"]
score += 0.75 * years["2025"]["total_return"]
score += 0.50 * years["2024"]["total_return"]
score += 0.20 * years["2026_YTD"]["total_return"]
score += 0.25 * max(0.0, -0.15 - windows["5y"]["max_drawdown"])
score -= 1.5 * negative_years
score -= 0.4 * mdd_violations
return score, negative_years, mdd_violations
def _compute_metrics(returns: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
curve = _curve_from_returns(returns)
windows, years, _, _, _ = _metrics_for_curve(curve, latest_bar)
score, negative_years, mdd_violations = _segment_score(windows, years)
return windows, years, score, negative_years, mdd_violations
def _safe_ratio(num: float, den: float, fallback: float) -> float:
if not math.isfinite(num) or not math.isfinite(den) or den <= 1e-12:
return fallback
return num / den
def _apply_risk_overlay(returns: pd.Series, candidate: RiskOverlayCandidate) -> tuple[pd.Series, pd.DataFrame]:
idx = returns.index
realized_vol = returns.shift(1).rolling(candidate.vol_lookback_bars, min_periods=max(6, candidate.vol_lookback_bars // 3)).std()
anchor_vol = realized_vol.shift(1).expanding(min_periods=max(6, candidate.vol_lookback_bars // 2)).median()
scaled: list[float] = []
equities: list[float] = []
scales: list[float] = []
dd_series: list[float] = []
equity = 1000.0
history: list[float] = [equity]
for ts, base_ret in returns.items():
current_vol = float(realized_vol.get(ts, np.nan))
target_vol = float(anchor_vol.get(ts, np.nan))
vol_scale = _safe_ratio(target_vol * candidate.vol_target_mult, current_vol, candidate.max_scale)
vol_scale = min(candidate.max_scale, max(candidate.min_scale, vol_scale))
dd_lookback = max(2, candidate.dd_lookback_bars)
peak = max(history[-dd_lookback:])
current_dd = (equity / peak) - 1.0 if peak > 0.0 else 0.0
dd_scale = candidate.dd_scale if current_dd <= -candidate.dd_cut else 1.0
scale = min(vol_scale, dd_scale)
scaled_ret = float(base_ret) * scale
equity *= max(0.0, 1.0 + scaled_ret)
history.append(equity)
scaled.append(scaled_ret)
equities.append(equity)
scales.append(scale)
dd_series.append(current_dd)
frame = pd.DataFrame(
{
"timestamp": idx,
"base_return": returns.values,
"overlay_return": scaled,
"scale": scales,
"equity": equities,
"drawdown": dd_series,
"realized_vol": realized_vol.reindex(idx).values,
"anchor_vol": anchor_vol.reindex(idx).values,
}
)
return pd.Series(scaled, index=idx, dtype=float), frame
def _base_return_map(latest_bar: pd.Timestamp) -> tuple[dict[str, pd.Series], dict[str, object]]:
bundle, _ = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
baseline_returns = detail.set_index("timestamp")["portfolio_return"].astype(float)
blocker_025 = LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.25)
blocker_050 = LearnedBlockerCandidate(42, 12, 24, 1.0, -0.0025, 0.50)
block_frame = _build_block_dataset(detail, blocker_025.block_bars, regime_columns)
blocker_025_returns = _simulate_candidate(detail, block_frame, regime_columns, blocker_025)
blocker_050_returns = _simulate_candidate(detail, block_frame, regime_columns, blocker_050)
return {
"baseline": baseline_returns,
"blocker_025": blocker_025_returns,
"blocker_050": blocker_050_returns,
}, {
"detail": detail,
"regime_columns": regime_columns,
}
def _candidate_space() -> list[RiskOverlayCandidate]:
space: list[RiskOverlayCandidate] = []
grid_mode = os.getenv("STRATEGY32_RISK_OVERLAY_GRID", "frontier").strip().lower()
if grid_mode == "wide":
base_names = ("baseline", "blocker_025", "blocker_050")
vol_lookbacks = (21, 42, 84)
vol_targets = (0.90, 1.00, 1.10)
min_scales = (0.50, 0.75)
max_scales = (1.00, 1.10)
dd_lookbacks = (42, 84)
dd_cuts = (0.08, 0.12, 0.16)
dd_scales = (0.25, 0.50, 0.75)
else:
base_names = ("baseline", "blocker_050")
vol_lookbacks = (42,)
vol_targets = (0.95, 1.00, 1.05)
min_scales = (0.50, 0.75)
max_scales = (1.00,)
dd_lookbacks = (42,)
dd_cuts = (0.08, 0.12)
dd_scales = (0.50, 0.75)
for base_name in base_names:
for vol_lookback_bars in vol_lookbacks:
for vol_target_mult in vol_targets:
for min_scale in min_scales:
for max_scale in max_scales:
if max_scale < min_scale:
continue
for dd_lookback_bars in dd_lookbacks:
for dd_cut in dd_cuts:
for dd_scale in dd_scales:
space.append(
RiskOverlayCandidate(
base_name=base_name,
vol_lookback_bars=vol_lookback_bars,
vol_target_mult=vol_target_mult,
min_scale=min_scale,
max_scale=max_scale,
dd_lookback_bars=dd_lookback_bars,
dd_cut=dd_cut,
dd_scale=dd_scale,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
base_returns_map, _ = _base_return_map(latest_bar)
baseline_windows, baseline_years, baseline_score, baseline_negative_years, baseline_mdd_violations = _compute_metrics(
base_returns_map["baseline"], latest_bar
)
blocker025_windows, blocker025_years, blocker025_score, blocker025_negative_years, blocker025_mdd_violations = _compute_metrics(
base_returns_map["blocker_025"], latest_bar
)
blocker050_windows, blocker050_years, blocker050_score, blocker050_negative_years, blocker050_mdd_violations = _compute_metrics(
base_returns_map["blocker_050"], latest_bar
)
space = _candidate_space()
top: list[dict[str, object]] = []
for idx, candidate in enumerate(space, start=1):
overlay_returns, overlay_frame = _apply_risk_overlay(base_returns_map[candidate.base_name], candidate)
windows, years, score, negative_years, mdd_violations = _compute_metrics(overlay_returns, latest_bar)
payload = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
"mean_scale": float(overlay_frame["scale"].mean()),
"min_scale": float(overlay_frame["scale"].min()),
"max_scale": float(overlay_frame["scale"].max()),
}
top.append(payload)
top.sort(key=lambda item: float(item["score"]), reverse=True)
top = top[:15]
if idx % 150 == 0 or idx == len(space):
print(f"[search] {idx}/{len(space)}", flush=True)
output = {
"analysis": "current_cash_risk_overlay_search",
"latest_bar": str(latest_bar),
"baseline": {
"score": baseline_score,
"negative_years": baseline_negative_years,
"mdd_violations": baseline_mdd_violations,
"windows": baseline_windows,
"years": baseline_years,
},
"blocker_025": {
"score": blocker025_score,
"negative_years": blocker025_negative_years,
"mdd_violations": blocker025_mdd_violations,
"windows": blocker025_windows,
"years": blocker025_years,
},
"blocker_050": {
"score": blocker050_score,
"negative_years": blocker050_negative_years,
"mdd_violations": blocker050_mdd_violations,
"windows": blocker050_windows,
"years": blocker050_years,
},
"top15": top,
}
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(json.dumps(top[:5], indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,278 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
RELAXED_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
_baseline_summary,
_overlay_weights,
)
OUT_JSON = Path("/tmp/strategy32_current_relaxed_block_router.json")
@dataclass(frozen=True, slots=True)
class BlockRouterCandidate:
positive_regimes: tuple[str, ...]
core_score_min: float
breadth_persist_min: float
funding_persist_min: float
panic_max: float
choppy_max: float
distribution_max: float
current_cash_min: float
block_bars: int
@property
def name(self) -> str:
regimes = ",".join(self.positive_regimes)
return (
f"regimes:{regimes}"
f"|core>={self.core_score_min:.2f}"
f"|breadth>={self.breadth_persist_min:.2f}"
f"|funding>={self.funding_persist_min:.2f}"
f"|panic<={self.panic_max:.2f}"
f"|choppy<={self.choppy_max:.2f}"
f"|dist<={self.distribution_max:.2f}"
f"|cash>={self.current_cash_min:.2f}"
f"|block:{self.block_bars}"
)
def _build_strategy_detail(components: dict[str, object]) -> pd.DataFrame:
timestamps = list(components["timestamps"])
score_map = components["score_frame"].set_index("timestamp").sort_index()
cash_map = components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
core_returns = components["core_returns"]
cap_returns = components["cap_returns"]
chop_returns = components["chop_returns"]
dist_returns = components["dist_returns"]
rows: list[dict[str, object]] = []
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, core_cash_pct)
portfolio_return = (
float(core_returns.get(execution_ts, 0.0))
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
)
rows.append(
{
"timestamp": execution_ts,
"strategic_regime": str(score_row.get("strategic_regime", "")),
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"core_cash_pct": core_cash_pct,
"portfolio_return": portfolio_return,
}
)
return pd.DataFrame(rows)
def _pick_relaxed(row: pd.Series, candidate: BlockRouterCandidate) -> bool:
return (
str(row.get("strategic_regime", "")) in candidate.positive_regimes
and float(row.get("core_score", 0.0)) >= candidate.core_score_min
and float(row.get("breadth_persist", 0.0)) >= candidate.breadth_persist_min
and float(row.get("funding_persist", 0.0)) >= candidate.funding_persist_min
and float(row.get("panic_score", 0.0)) <= candidate.panic_max
and float(row.get("choppy_score", 0.0)) <= candidate.choppy_max
and float(row.get("distribution_score", 0.0)) <= candidate.distribution_max
and float(row.get("current_cash_pct", 0.0)) >= candidate.current_cash_min
)
def _compose_block_returns(detail: pd.DataFrame, candidate: BlockRouterCandidate) -> pd.Series:
returns: list[float] = []
idx: list[pd.Timestamp] = []
rows = detail.reset_index(drop=True)
for start in range(0, len(rows), candidate.block_bars):
end = min(start + candidate.block_bars, len(rows))
block = rows.iloc[start:end]
trigger = block.iloc[0]
use_relaxed = _pick_relaxed(trigger, candidate)
source_col = "relaxed_return" if use_relaxed else "current_return"
returns.extend(block[source_col].tolist())
idx.extend(block["timestamp"].tolist())
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _curve_from_returns(returns: pd.Series) -> pd.Series:
equity = 1000.0
vals = [equity]
idx = [returns.index[0] - pd.Timedelta(hours=4)]
for ts, ret in returns.items():
equity *= max(0.0, 1.0 + float(ret))
idx.append(pd.Timestamp(ts))
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
years = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _candidate_space() -> list[BlockRouterCandidate]:
space: list[BlockRouterCandidate] = []
positive_sets = (
("MOMENTUM_EXPANSION",),
("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
("CHOPPY_ROTATION", "MOMENTUM_EXPANSION"),
)
for positive_regimes in positive_sets:
for core_score_min in (0.50, 0.55, 0.60):
for breadth_persist_min in (0.45, 0.50, 0.55):
for funding_persist_min in (0.50, 0.55, 0.60):
for panic_max in (0.20, 0.30):
for choppy_max in (0.20, 0.30, 0.40):
for distribution_max in (0.20, 0.30, 0.40):
for current_cash_min in (0.50, 0.65, 0.80):
for block_bars in (42, 84, 180):
space.append(
BlockRouterCandidate(
positive_regimes=positive_regimes,
core_score_min=core_score_min,
breadth_persist_min=breadth_persist_min,
funding_persist_min=funding_persist_min,
panic_max=panic_max,
choppy_max=choppy_max,
distribution_max=distribution_max,
current_cash_min=current_cash_min,
block_bars=block_bars,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current", flush=True)
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
print("[phase] build relaxed", flush=True)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
current_detail = _build_strategy_detail(current).rename(
columns={
"core_cash_pct": "current_cash_pct",
"portfolio_return": "current_return",
}
)
relaxed_detail = _build_strategy_detail(relaxed).rename(
columns={
"core_cash_pct": "relaxed_cash_pct",
"portfolio_return": "relaxed_return",
}
)
detail = current_detail.merge(
relaxed_detail[["timestamp", "relaxed_cash_pct", "relaxed_return"]],
on="timestamp",
how="inner",
)
rows: list[dict[str, object]] = []
candidates = _candidate_space()
print(f"[phase] search {len(candidates)} block-router candidates", flush=True)
for idx, candidate in enumerate(candidates, start=1):
returns = _compose_block_returns(detail, candidate)
curve = _curve_from_returns(returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
)
if idx % 96 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
rows.sort(key=lambda row: float(row["score"]), reverse=True)
best = BlockRouterCandidate(**rows[0]["candidate"])
best_returns = _compose_block_returns(detail, best)
best_curve = _curve_from_returns(best_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(best_curve, latest_bar)
payload = {
"analysis": "current_relaxed_block_router",
"latest_bar": str(latest_bar),
"candidate": asdict(best),
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
"baselines": _baseline_summary(),
"search_top": rows[:10],
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,364 @@
from __future__ import annotations
import json
import multiprocessing as mp
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import asdict, dataclass
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
BASELINE_PATH,
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
RELAXED_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
_baseline_summary,
_overlay_weights,
)
OUT_JSON = Path("/tmp/strategy32_current_relaxed_entry_overlay.json")
@dataclass(frozen=True, slots=True)
class EntryOverlayCandidate:
positive_regimes: tuple[str, ...]
core_score_min: float
breadth_persist_min: float
funding_persist_min: float
panic_max: float
choppy_max: float
distribution_max: float
current_cash_min: float
relaxed_invested_min: float
incremental_scale: float
@property
def name(self) -> str:
regimes = ",".join(self.positive_regimes)
return (
f"regimes:{regimes}"
f"|core>={self.core_score_min:.2f}"
f"|breadth>={self.breadth_persist_min:.2f}"
f"|funding>={self.funding_persist_min:.2f}"
f"|panic<={self.panic_max:.2f}"
f"|choppy<={self.choppy_max:.2f}"
f"|dist<={self.distribution_max:.2f}"
f"|cash>={self.current_cash_min:.2f}"
f"|relaxed>={self.relaxed_invested_min:.2f}"
f"|scale:{self.incremental_scale:.2f}"
)
def _pick_entry_overlay(score_row: dict[str, float], candidate: EntryOverlayCandidate) -> bool:
return (
str(score_row.get("strategic_regime")) in candidate.positive_regimes
and float(score_row.get("core_score", 0.0)) >= candidate.core_score_min
and float(score_row.get("breadth_persist", 0.0) or 0.0) >= candidate.breadth_persist_min
and float(score_row.get("funding_persist", 0.0) or 0.0) >= candidate.funding_persist_min
and float(score_row.get("panic_score", 0.0)) <= candidate.panic_max
and float(score_row.get("choppy_score", 0.0)) <= candidate.choppy_max
and float(score_row.get("distribution_score", 0.0)) <= candidate.distribution_max
)
def _compose_entry_overlay_curve(
*,
current_components: dict[str, object],
relaxed_components: dict[str, object],
candidate: EntryOverlayCandidate,
) -> tuple[pd.Series, pd.DataFrame]:
timestamps = list(current_components["timestamps"])
score_map = current_components["score_frame"].set_index("timestamp").sort_index()
current_cash_map = current_components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
relaxed_cash_map = relaxed_components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
current_core_returns = current_components["core_returns"]
relaxed_core_returns = relaxed_components["core_returns"]
cap_returns = current_components["cap_returns"]
chop_returns = current_components["chop_returns"]
dist_returns = current_components["dist_returns"]
equity = 1000.0
idx = [timestamps[0]]
vals = [equity]
rows: list[dict[str, object]] = []
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
current_cash_pct = float(current_cash_map.get(signal_ts, current_cash_map.iloc[-1] if not current_cash_map.empty else 1.0))
current_invested = max(0.0, 1.0 - current_cash_pct)
relaxed_cash_pct = float(relaxed_cash_map.get(signal_ts, relaxed_cash_map.iloc[-1] if not relaxed_cash_map.empty else 1.0))
relaxed_invested = max(0.0, 1.0 - relaxed_cash_pct)
add_relaxed = 0.0
use_relaxed = False
if (
current_cash_pct >= candidate.current_cash_min
and relaxed_invested >= candidate.relaxed_invested_min
and _pick_entry_overlay(score_row, candidate)
):
use_relaxed = True
extra_invested = max(relaxed_invested - current_invested, 0.0)
if extra_invested > 0.0:
add_relaxed = min(current_cash_pct, extra_invested * candidate.incremental_scale)
relaxed_unit_return = 0.0
if add_relaxed > 0.0 and relaxed_invested > 1e-9:
relaxed_unit_return = float(relaxed_core_returns.get(execution_ts, 0.0)) / relaxed_invested
residual_cash = max(current_cash_pct - add_relaxed, 0.0)
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, residual_cash)
bar_ret = (
float(current_core_returns.get(execution_ts, 0.0))
+ add_relaxed * relaxed_unit_return
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
)
equity *= max(0.0, 1.0 + bar_ret)
idx.append(execution_ts)
vals.append(equity)
rows.append(
{
"timestamp": execution_ts,
"used_relaxed_overlay": use_relaxed,
"current_cash_pct": current_cash_pct,
"relaxed_invested": relaxed_invested,
"added_relaxed_weight": add_relaxed,
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"portfolio_return": bar_ret,
}
)
curve = pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
return curve, pd.DataFrame(rows)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows: dict[str, dict[str, float]] = {}
for days, label in WINDOWS:
start = latest_bar - pd.Timedelta(days=days)
windows[label] = segment_metrics(curve, start, latest_bar)
years: dict[str, dict[str, float]] = {}
for label, start, end_exclusive in YEAR_PERIODS:
years[label] = segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _candidate_space() -> list[EntryOverlayCandidate]:
space: list[EntryOverlayCandidate] = []
positive_sets = (
("EUPHORIC_BREAKOUT",),
("MOMENTUM_EXPANSION", "EUPHORIC_BREAKOUT"),
)
for positive_regimes in positive_sets:
for core_score_min in (0.50, 0.60):
for breadth_persist_min in (0.50, 0.55):
for funding_persist_min in (0.55, 0.60):
for panic_max in (0.20, 0.30):
for choppy_max in (0.40, 0.50):
for distribution_max in (0.30, 0.40):
for current_cash_min in (0.35, 0.50, 0.65):
for relaxed_invested_min in (0.15, 0.25, 0.35):
for incremental_scale in (0.50, 0.75, 1.00):
space.append(
EntryOverlayCandidate(
positive_regimes=positive_regimes,
core_score_min=core_score_min,
breadth_persist_min=breadth_persist_min,
funding_persist_min=funding_persist_min,
panic_max=panic_max,
choppy_max=choppy_max,
distribution_max=distribution_max,
current_cash_min=current_cash_min,
relaxed_invested_min=relaxed_invested_min,
incremental_scale=incremental_scale,
)
)
return space
def _period_worker(
cache_path: str,
candidate_payload: dict[str, object],
kind: str,
label: str,
start_text: str,
end_text: str,
) -> tuple[str, str, dict[str, float], list[dict[str, object]]]:
bundle, _ = load_component_bundle(cache_path)
candidate = EntryOverlayCandidate(**candidate_payload)
start = pd.Timestamp(start_text)
end = pd.Timestamp(end_text)
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=start,
eval_end=end,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
curve, rows = _compose_entry_overlay_curve(
current_components=current,
relaxed_components=relaxed,
candidate=candidate,
)
latest_rows: list[dict[str, object]] = []
if label == "2026_YTD":
latest_rows = rows.tail(5).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records")
return kind, label, segment_metrics(curve, start, end), latest_rows
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current components", flush=True)
current_components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
print("[phase] build relaxed components", flush=True)
relaxed_components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
search_rows: list[dict[str, object]] = []
candidates = _candidate_space()
print("[phase] search entry-overlay candidates", flush=True)
for idx, candidate in enumerate(candidates, start=1):
curve, rows = _compose_entry_overlay_curve(
current_components=current_components,
relaxed_components=relaxed_components,
candidate=candidate,
)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
relaxed_share = float((rows["added_relaxed_weight"] > 0.0).mean()) if not rows.empty else 0.0
avg_added = float(rows["added_relaxed_weight"].mean()) if not rows.empty else 0.0
search_rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"relaxed_entry_share": relaxed_share,
"avg_added_relaxed_weight": avg_added,
"windows": windows,
"years": years,
}
)
if idx % 96 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
search_rows.sort(key=lambda row: float(row["score"]), reverse=True)
best_candidate = EntryOverlayCandidate(**search_rows[0]["candidate"])
print(f"[phase] exact best {best_candidate.name}", flush=True)
window_results: dict[str, dict[str, float]] = {}
year_results: dict[str, dict[str, float]] = {}
latest_rows: list[dict[str, object]] = []
specs = [
*(("window", label, latest_bar - pd.Timedelta(days=days), latest_bar) for days, label in WINDOWS),
*(("year", label, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1))) for label, start, end_exclusive in YEAR_PERIODS),
("year", "2026_YTD", YTD_START, latest_bar),
]
ctx = mp.get_context("fork")
with ProcessPoolExecutor(max_workers=min(6, len(specs)), mp_context=ctx) as executor:
future_map = {
executor.submit(
_period_worker,
CACHE_PATH,
asdict(best_candidate),
kind,
label,
str(start),
str(end),
): (kind, label)
for kind, label, start, end in specs
}
for future in as_completed(future_map):
kind, label = future_map[future]
kind_result, label_result, metrics, latest = future.result()
if kind_result == "window":
window_results[label_result] = metrics
else:
year_results[label_result] = metrics
if latest:
latest_rows = latest
print(f"[done] {label_result}", flush=True)
score, negative_years, mdd_violations = score_candidate(
{label: window_results[label] for _, label in WINDOWS},
{label: year_results[label] for label, _, _ in YEAR_PERIODS},
)
payload = {
"analysis": "current_relaxed_entry_overlay",
"latest_bar": str(latest_bar),
"candidate": asdict(best_candidate),
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": {label: window_results[label] for _, label in WINDOWS},
"years": year_results,
"latest_rows": latest_rows,
"baselines": _baseline_summary(),
"search_top": search_rows[:5],
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,428 @@
from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import numpy as np
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, score_candidate, segment_metrics
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
RELAXED_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
_baseline_summary,
_overlay_weights,
)
OUT_JSON = Path("/tmp/strategy32_current_relaxed_learned_entry_overlay.json")
@dataclass(frozen=True, slots=True)
class LearnedOverlayCandidate:
block_bars: int
train_min_blocks: int
lookback_blocks: int
ridge_alpha: float
prediction_threshold: float
overlay_scale: float
@property
def name(self) -> str:
return (
f"block:{self.block_bars}"
f"|train:{self.train_min_blocks}"
f"|lookback:{self.lookback_blocks}"
f"|alpha:{self.ridge_alpha:.2f}"
f"|th:{self.prediction_threshold:.4f}"
f"|scale:{self.overlay_scale:.2f}"
)
def _build_strategy_detail(components: dict[str, object]) -> pd.DataFrame:
timestamps = list(components["timestamps"])
score_map = components["score_frame"].set_index("timestamp").sort_index()
cash_map = components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
core_returns = components["core_returns"]
cap_returns = components["cap_returns"]
chop_returns = components["chop_returns"]
dist_returns = components["dist_returns"]
rows: list[dict[str, object]] = []
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, core_cash_pct)
rows.append(
{
"timestamp": execution_ts,
"strategic_regime": str(score_row.get("strategic_regime", "")),
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"taker_persist": float(score_row.get("taker_persist", 0.0) or 0.0),
"volume_accel_persist": float(score_row.get("volume_accel_persist", 0.0) or 0.0),
"mean_taker_imbalance": float(score_row.get("mean_taker_imbalance", 0.0) or 0.0),
"taker_imbalance_dispersion": float(score_row.get("taker_imbalance_dispersion", 0.0) or 0.0),
"positive_taker_ratio": float(score_row.get("positive_taker_ratio", 0.0) or 0.0),
"mean_alt_volume_accel": float(score_row.get("mean_alt_volume_accel", 0.0) or 0.0),
"positive_volume_accel_ratio": float(score_row.get("positive_volume_accel_ratio", 0.0) or 0.0),
"funding_dispersion": float(score_row.get("funding_dispersion", 0.0) or 0.0),
"basis_dispersion": float(score_row.get("basis_dispersion", 0.0) or 0.0),
"alt_return_dispersion_7d": float(score_row.get("alt_return_dispersion_7d", 0.0) or 0.0),
"mean_funding_acceleration": float(score_row.get("mean_funding_acceleration", 0.0) or 0.0),
"mean_basis_trend": float(score_row.get("mean_basis_trend", 0.0) or 0.0),
"cash_pct": core_cash_pct,
"invested_pct": max(0.0, 1.0 - core_cash_pct),
"cap_weight": cap_weight,
"chop_weight": chop_weight,
"dist_weight": dist_weight,
"portfolio_return": (
float(core_returns.get(execution_ts, 0.0))
+ cap_weight * float(cap_returns.get(execution_ts, 0.0))
+ chop_weight * float(chop_returns.get(execution_ts, 0.0))
+ dist_weight * float(dist_returns.get(execution_ts, 0.0))
),
}
)
return pd.DataFrame(rows)
def _curve_from_returns(returns: pd.Series) -> pd.Series:
equity = 1000.0
vals = [equity]
idx = [returns.index[0] - pd.Timedelta(hours=4)]
for ts, ret in returns.items():
equity *= max(0.0, 1.0 + float(ret))
idx.append(pd.Timestamp(ts))
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
years = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _ridge_predict(train_x: np.ndarray, train_y: np.ndarray, test_x: np.ndarray, alpha: float) -> float:
if len(train_x) == 0:
return 0.0
train_x = np.nan_to_num(train_x, nan=0.0, posinf=0.0, neginf=0.0)
train_y = np.nan_to_num(train_y, nan=0.0, posinf=0.0, neginf=0.0)
test_x = np.nan_to_num(test_x, nan=0.0, posinf=0.0, neginf=0.0)
mean = train_x.mean(axis=0)
std = train_x.std(axis=0)
std[std < 1e-9] = 1.0
x_train = (train_x - mean) / std
x_test = (test_x - mean) / std
x_train = np.clip(x_train, -8.0, 8.0)
x_test = np.clip(x_test, -8.0, 8.0)
train_y = np.clip(train_y, -0.50, 0.50)
x_train = np.column_stack([np.ones(len(x_train)), x_train])
x_test = np.concatenate([[1.0], x_test])
penalty = np.eye(x_train.shape[1]) * alpha
penalty[0, 0] = 0.0
lhs = x_train.T @ x_train + penalty
rhs = x_train.T @ train_y
try:
beta = np.linalg.solve(lhs, rhs)
except np.linalg.LinAlgError:
beta = np.linalg.pinv(lhs) @ rhs
return float(x_test @ beta)
def _build_regime_columns(detail: pd.DataFrame) -> list[str]:
regime_dummies = pd.get_dummies(detail["strategic_regime"], prefix="regime", dtype=float)
for column in regime_dummies.columns:
detail[column] = regime_dummies[column]
return sorted(regime_dummies.columns.tolist())
def _build_block_dataset(detail: pd.DataFrame, block_bars: int, regime_columns: list[str]) -> pd.DataFrame:
rows: list[dict[str, object]] = []
frame = detail.copy()
frame["trailing_current_42"] = frame["current_return"].shift(1).rolling(42, min_periods=6).sum()
frame["trailing_relaxed_42"] = frame["relaxed_return"].shift(1).rolling(42, min_periods=6).sum()
frame["trailing_diff_42"] = frame["trailing_relaxed_42"] - frame["trailing_current_42"]
frame["trailing_core_score_21"] = frame["core_score"].shift(1).rolling(21, min_periods=6).mean()
frame["trailing_breadth_21"] = frame["breadth_persist"].shift(1).rolling(21, min_periods=6).mean()
frame["trailing_choppy_21"] = frame["choppy_score"].shift(1).rolling(21, min_periods=6).mean()
frame["extra_raw"] = np.minimum(frame["current_cash_pct"], np.maximum(frame["relaxed_invested_pct"] - frame["current_invested_pct"], 0.0))
relaxed_unit = np.where(frame["relaxed_invested_pct"] > 1e-9, frame["relaxed_return"] / frame["relaxed_invested_pct"], 0.0)
frame["overlay_add_return_full"] = frame["extra_raw"] * relaxed_unit
for start in range(0, len(frame), block_bars):
block = frame.iloc[start : start + block_bars]
if block.empty:
continue
trigger = block.iloc[0]
current_total = float((1.0 + block["current_return"]).prod() - 1.0)
relaxed_total = float((1.0 + block["relaxed_return"]).prod() - 1.0)
overlay_total = float((1.0 + (block["current_return"] + block["overlay_add_return_full"])).prod() / (1.0 + current_total) - 1.0)
row = {
"timestamp": trigger["timestamp"],
"current_total": current_total,
"relaxed_total": relaxed_total,
"overlay_total_full": overlay_total,
"current_cash_pct": float(trigger["current_cash_pct"]),
"relaxed_invested_pct": float(trigger["relaxed_invested_pct"]),
"core_score": float(trigger["core_score"]),
"breadth_persist": float(trigger["breadth_persist"]),
"funding_persist": float(trigger["funding_persist"]),
"taker_persist": float(trigger["taker_persist"]),
"volume_accel_persist": float(trigger["volume_accel_persist"]),
"mean_taker_imbalance": float(trigger["mean_taker_imbalance"]),
"taker_imbalance_dispersion": float(trigger["taker_imbalance_dispersion"]),
"positive_taker_ratio": float(trigger["positive_taker_ratio"]),
"mean_alt_volume_accel": float(trigger["mean_alt_volume_accel"]),
"positive_volume_accel_ratio": float(trigger["positive_volume_accel_ratio"]),
"funding_dispersion": float(trigger["funding_dispersion"]),
"basis_dispersion": float(trigger["basis_dispersion"]),
"alt_return_dispersion_7d": float(trigger["alt_return_dispersion_7d"]),
"mean_funding_acceleration": float(trigger["mean_funding_acceleration"]),
"mean_basis_trend": float(trigger["mean_basis_trend"]),
"panic_score": float(trigger["panic_score"]),
"choppy_score": float(trigger["choppy_score"]),
"distribution_score": float(trigger["distribution_score"]),
"trailing_current_42": float(trigger["trailing_current_42"]) if pd.notna(trigger["trailing_current_42"]) else 0.0,
"trailing_relaxed_42": float(trigger["trailing_relaxed_42"]) if pd.notna(trigger["trailing_relaxed_42"]) else 0.0,
"trailing_diff_42": float(trigger["trailing_diff_42"]) if pd.notna(trigger["trailing_diff_42"]) else 0.0,
"trailing_core_score_21": float(trigger["trailing_core_score_21"]) if pd.notna(trigger["trailing_core_score_21"]) else 0.0,
"trailing_breadth_21": float(trigger["trailing_breadth_21"]) if pd.notna(trigger["trailing_breadth_21"]) else 0.0,
"trailing_choppy_21": float(trigger["trailing_choppy_21"]) if pd.notna(trigger["trailing_choppy_21"]) else 0.0,
"block_start_index": int(start),
"block_end_index": int(block.index[-1]),
}
for column in regime_columns:
row[column] = float(trigger.get(column, 0.0))
rows.append(row)
return pd.DataFrame(rows)
def _feature_columns(regime_columns: list[str]) -> list[str]:
return [
"core_score",
"breadth_persist",
"funding_persist",
"taker_persist",
"volume_accel_persist",
"mean_taker_imbalance",
"taker_imbalance_dispersion",
"positive_taker_ratio",
"mean_alt_volume_accel",
"positive_volume_accel_ratio",
"funding_dispersion",
"basis_dispersion",
"alt_return_dispersion_7d",
"mean_funding_acceleration",
"mean_basis_trend",
"panic_score",
"choppy_score",
"distribution_score",
"current_cash_pct",
"relaxed_invested_pct",
"trailing_current_42",
"trailing_relaxed_42",
"trailing_diff_42",
"trailing_core_score_21",
"trailing_breadth_21",
"trailing_choppy_21",
*regime_columns,
]
def _simulate_candidate(
detail: pd.DataFrame,
block_frame: pd.DataFrame,
regime_columns: list[str],
candidate: LearnedOverlayCandidate,
) -> pd.Series:
rows = detail.reset_index(drop=True)
features = _feature_columns(regime_columns)
returns: list[float] = []
idx: list[pd.Timestamp] = []
for block_idx, block in block_frame.iterrows():
start_idx = int(block["block_start_index"])
end_idx = int(block["block_end_index"])
bar_block = rows.iloc[start_idx : end_idx + 1]
use_overlay = False
if block_idx >= candidate.train_min_blocks:
train_start = max(0, block_idx - candidate.lookback_blocks)
train = block_frame.iloc[train_start:block_idx]
train_x = train[features].to_numpy(dtype=float)
train_y = train["overlay_total_full"].to_numpy(dtype=float)
test_x = block[features].to_numpy(dtype=float)
pred = _ridge_predict(train_x, train_y, test_x, candidate.ridge_alpha)
use_overlay = pred > candidate.prediction_threshold
for row in bar_block.itertuples(index=False):
extra_add = 0.0
if use_overlay:
extra_add = float(getattr(row, "overlay_add_return_full")) * candidate.overlay_scale
returns.append(float(getattr(row, "current_return")) + extra_add)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _candidate_space() -> list[LearnedOverlayCandidate]:
space: list[LearnedOverlayCandidate] = []
for block_bars in (42, 84):
for train_min_blocks in (12, 18, 24):
for lookback_blocks in (24, 60):
if lookback_blocks < train_min_blocks:
continue
for ridge_alpha in (0.5, 1.0, 5.0, 20.0):
for prediction_threshold in (0.0, 0.0010, 0.0025, 0.0050):
for overlay_scale in (0.25, 0.50, 0.75, 1.00):
space.append(
LearnedOverlayCandidate(
block_bars=block_bars,
train_min_blocks=train_min_blocks,
lookback_blocks=lookback_blocks,
ridge_alpha=ridge_alpha,
prediction_threshold=prediction_threshold,
overlay_scale=overlay_scale,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current", flush=True)
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
print("[phase] build relaxed", flush=True)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
current_detail = _build_strategy_detail(current).rename(
columns={
"cash_pct": "current_cash_pct",
"invested_pct": "current_invested_pct",
"portfolio_return": "current_return",
}
)
relaxed_detail = _build_strategy_detail(relaxed).rename(
columns={
"cash_pct": "relaxed_cash_pct",
"invested_pct": "relaxed_invested_pct",
"portfolio_return": "relaxed_return",
}
)
detail = current_detail.merge(
relaxed_detail[
[
"timestamp",
"relaxed_cash_pct",
"relaxed_invested_pct",
"relaxed_return",
]
],
on="timestamp",
how="inner",
)
detail["extra_raw"] = np.minimum(detail["current_cash_pct"], np.maximum(detail["relaxed_invested_pct"] - detail["current_invested_pct"], 0.0))
relaxed_unit = np.where(detail["relaxed_invested_pct"] > 1e-9, detail["relaxed_return"] / detail["relaxed_invested_pct"], 0.0)
detail["overlay_add_return_full"] = detail["extra_raw"] * relaxed_unit
regime_columns = _build_regime_columns(detail)
candidates = _candidate_space()
rows: list[dict[str, object]] = []
print(f"[phase] learned search {len(candidates)} candidates", flush=True)
block_cache: dict[int, pd.DataFrame] = {}
for idx, candidate in enumerate(candidates, start=1):
block_frame = block_cache.get(candidate.block_bars)
if block_frame is None:
block_frame = _build_block_dataset(detail, candidate.block_bars, regime_columns)
block_cache[candidate.block_bars] = block_frame
returns = _simulate_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
rows.append(
{
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
)
if idx % 96 == 0 or idx == len(candidates):
print(f"[search] {idx}/{len(candidates)}", flush=True)
rows.sort(key=lambda row: float(row["score"]), reverse=True)
best = rows[0]
payload = {
"analysis": "current_relaxed_learned_entry_overlay",
"latest_bar": str(latest_bar),
"candidate": best["candidate"],
"score": best["score"],
"negative_years": best["negative_years"],
"mdd_violations": best["mdd_violations"],
"windows": best["windows"],
"years": best["years"],
"baselines": _baseline_summary(),
"search_top": rows[:10],
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,265 @@
from __future__ import annotations
import json
import sys
from pathlib import Path
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import build_cash_overlay_period_components, load_component_bundle, segment_metrics
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
RELAXED_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
_overlay_weights,
)
OUT_JSON = Path("/tmp/strategy32_current_relaxed_oracle_analysis.json")
def _build_strategy_detail(components: dict[str, object]) -> pd.DataFrame:
timestamps = list(components["timestamps"])
score_map = components["score_frame"].set_index("timestamp").sort_index()
cash_map = components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
core_returns = components["core_returns"]
cap_returns = components["cap_returns"]
chop_returns = components["chop_returns"]
dist_returns = components["dist_returns"]
rows: list[dict[str, object]] = []
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
cap_weight, chop_weight, dist_weight = _overlay_weights(BEST_CASH_OVERLAY, score_row, core_cash_pct)
core_ret = float(core_returns.get(execution_ts, 0.0))
cap_ret = float(cap_returns.get(execution_ts, 0.0))
chop_ret = float(chop_returns.get(execution_ts, 0.0))
dist_ret = float(dist_returns.get(execution_ts, 0.0))
portfolio_return = (
core_ret
+ cap_weight * cap_ret
+ chop_weight * chop_ret
+ dist_weight * dist_ret
)
rows.append(
{
"timestamp": execution_ts,
"strategic_regime": str(score_row.get("strategic_regime", "")),
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"core_cash_pct": core_cash_pct,
"cap_weight": cap_weight,
"chop_weight": chop_weight,
"dist_weight": dist_weight,
"portfolio_return": portfolio_return,
}
)
return pd.DataFrame(rows)
def _curve_from_returns(returns: pd.Series) -> pd.Series:
equity = 1000.0
vals = [equity]
idx = [returns.index[0] - pd.Timedelta(hours=4)]
for ts, ret in returns.items():
equity *= max(0.0, 1.0 + float(ret))
idx.append(pd.Timestamp(ts))
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _window_metrics(curve: pd.Series, latest_bar: pd.Timestamp) -> dict[str, dict[str, float]]:
return {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
def _year_metrics(curve: pd.Series, latest_bar: pd.Timestamp) -> dict[str, dict[str, float]]:
years = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
return years
def _regime_summary(detail: pd.DataFrame) -> list[dict[str, object]]:
rows: list[dict[str, object]] = []
for regime, chunk in detail.groupby("strategic_regime", dropna=False):
rows.append(
{
"strategic_regime": regime or "",
"bars": int(len(chunk)),
"share": float(len(chunk) / len(detail)) if len(detail) else 0.0,
"current_avg_return": float(chunk["current_return"].mean()),
"relaxed_avg_return": float(chunk["relaxed_return"].mean()),
"oracle_avg_return": float(chunk["oracle_bar_return"].mean()),
"relaxed_win_rate": float((chunk["relaxed_return"] > chunk["current_return"]).mean()),
"avg_diff_relaxed_minus_current": float((chunk["relaxed_return"] - chunk["current_return"]).mean()),
}
)
return sorted(rows, key=lambda row: row["share"], reverse=True)
def _winner_feature_summary(detail: pd.DataFrame, winner: str) -> dict[str, object]:
if winner == "relaxed":
mask = detail["relaxed_return"] > detail["current_return"]
else:
mask = detail["current_return"] >= detail["relaxed_return"]
chunk = detail.loc[mask].copy()
if chunk.empty:
return {"bars": 0}
return {
"bars": int(len(chunk)),
"share": float(len(chunk) / len(detail)) if len(detail) else 0.0,
"avg_core_score": float(chunk["core_score"].mean()),
"avg_breadth_persist": float(chunk["breadth_persist"].mean()),
"avg_funding_persist": float(chunk["funding_persist"].mean()),
"avg_panic_score": float(chunk["panic_score"].mean()),
"avg_choppy_score": float(chunk["choppy_score"].mean()),
"avg_distribution_score": float(chunk["distribution_score"].mean()),
"avg_current_cash_pct": float(chunk["current_cash_pct"].mean()),
"avg_relaxed_cash_pct": float(chunk["relaxed_cash_pct"].mean()),
"top_regimes": (
chunk["strategic_regime"].value_counts(normalize=True).head(5).rename_axis("regime").reset_index(name="share").to_dict(orient="records")
),
}
def _oracle_block_returns(detail: pd.DataFrame, block_bars: int) -> pd.Series:
rows: list[float] = []
idx: list[pd.Timestamp] = []
bar_count = len(detail)
for start in range(0, bar_count, block_bars):
end = min(start + block_bars, bar_count)
chunk = detail.iloc[start:end]
current_total = float((1.0 + chunk["current_return"]).prod() - 1.0)
relaxed_total = float((1.0 + chunk["relaxed_return"]).prod() - 1.0)
winner = "relaxed" if relaxed_total > current_total else "current"
source_col = "relaxed_return" if winner == "relaxed" else "current_return"
rows.extend(chunk[source_col].tolist())
idx.extend(chunk["timestamp"].tolist())
return pd.Series(rows, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current", flush=True)
current = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
print("[phase] build relaxed", flush=True)
relaxed = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=RELAXED_OVERHEAT_OVERRIDES,
)
current_detail = _build_strategy_detail(current).rename(
columns={
"core_cash_pct": "current_cash_pct",
"portfolio_return": "current_return",
}
)
relaxed_detail = _build_strategy_detail(relaxed).rename(
columns={
"core_cash_pct": "relaxed_cash_pct",
"portfolio_return": "relaxed_return",
}
)
merged = current_detail.merge(
relaxed_detail[
[
"timestamp",
"relaxed_cash_pct",
"relaxed_return",
]
],
on="timestamp",
how="inner",
)
merged["oracle_bar_return"] = merged[["current_return", "relaxed_return"]].max(axis=1)
merged["winner"] = merged.apply(
lambda row: "relaxed" if row["relaxed_return"] > row["current_return"] else "current",
axis=1,
)
current_curve = _curve_from_returns(merged.set_index("timestamp")["current_return"])
relaxed_curve = _curve_from_returns(merged.set_index("timestamp")["relaxed_return"])
oracle_bar_curve = _curve_from_returns(merged.set_index("timestamp")["oracle_bar_return"])
oracle_7d_curve = _curve_from_returns(_oracle_block_returns(merged, block_bars=42))
oracle_30d_curve = _curve_from_returns(_oracle_block_returns(merged, block_bars=180))
payload = {
"analysis": "current_relaxed_oracle",
"latest_bar": str(latest_bar),
"bar_count": int(len(merged)),
"relaxed_win_rate": float((merged["winner"] == "relaxed").mean()),
"curves": {
"current": {
"windows": _window_metrics(current_curve, latest_bar),
"years": _year_metrics(current_curve, latest_bar),
},
"relaxed": {
"windows": _window_metrics(relaxed_curve, latest_bar),
"years": _year_metrics(relaxed_curve, latest_bar),
},
"oracle_bar": {
"windows": _window_metrics(oracle_bar_curve, latest_bar),
"years": _year_metrics(oracle_bar_curve, latest_bar),
},
"oracle_7d": {
"windows": _window_metrics(oracle_7d_curve, latest_bar),
"years": _year_metrics(oracle_7d_curve, latest_bar),
},
"oracle_30d": {
"windows": _window_metrics(oracle_30d_curve, latest_bar),
"years": _year_metrics(oracle_30d_curve, latest_bar),
},
},
"regime_summary": _regime_summary(merged),
"winner_feature_summary": {
"relaxed": _winner_feature_summary(merged, "relaxed"),
"current": _winner_feature_summary(merged, "current"),
},
"latest_rows": merged.tail(10).assign(timestamp=lambda df: df["timestamp"].astype(str)).to_dict(orient="records"),
}
OUT_JSON.write_text(json.dumps(payload, indent=2), encoding="utf-8")
print(json.dumps(payload, indent=2))
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()

View File

@@ -192,6 +192,16 @@ def build_strategic_regime_frame(
mean_funding: list[float] = [] mean_funding: list[float] = []
positive_funding_ratio: list[float] = [] positive_funding_ratio: list[float] = []
mean_basis: list[float] = [] mean_basis: list[float] = []
funding_dispersion: list[float] = []
basis_dispersion: list[float] = []
mean_taker_imbalance: list[float] = []
taker_imbalance_dispersion: list[float] = []
positive_taker_ratio: list[float] = []
mean_volume_acceleration: list[float] = []
positive_volume_accel_ratio: list[float] = []
return_dispersion_7d: list[float] = []
mean_funding_acceleration: list[float] = []
mean_basis_trend: list[float] = []
btc_7d_return: list[float] = [] btc_7d_return: list[float] = []
btc_30d_return: list[float] = [] btc_30d_return: list[float] = []
@@ -200,6 +210,11 @@ def build_strategic_regime_frame(
funding_vals = [] funding_vals = []
basis_vals = [] basis_vals = []
positive_votes = [] positive_votes = []
taker_imbalance_vals = []
volume_accel_vals = []
alt_returns_7d = []
funding_acceleration_vals = []
basis_trend_vals = []
for symbol, df in bundle.prices.items(): for symbol, df in bundle.prices.items():
if symbol == BTC_SYMBOL: if symbol == BTC_SYMBOL:
continue continue
@@ -207,6 +222,18 @@ def build_strategic_regime_frame(
if len(hist) >= 10: if len(hist) >= 10:
ema = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1] ema = hist["close"].ewm(span=20, adjust=False).mean().iloc[-1]
votes.append(float(hist["close"].iloc[-1] > ema)) votes.append(float(hist["close"].iloc[-1] > ema))
fast_vol = float(hist["volume"].tail(6).mean())
slow_vol = float(hist["volume"].mean())
if slow_vol > 0.0:
volume_accel_vals.append(fast_vol / slow_vol - 1.0)
if {"quote_volume", "taker_quote"} <= set(hist.columns):
recent_quote_volume = float(hist["quote_volume"].tail(6).sum())
recent_taker_quote = float(hist["taker_quote"].tail(6).sum())
if recent_quote_volume > 0.0:
taker_imbalance_vals.append((recent_taker_quote / recent_quote_volume) * 2.0 - 1.0)
ret_hist = df.loc[df["timestamp"] <= ts].tail(42)
if len(ret_hist) >= 42:
alt_returns_7d.append(float(ret_hist["close"].iloc[-1] / ret_hist["close"].iloc[0] - 1.0))
f_df = bundle.funding.get(symbol) f_df = bundle.funding.get(symbol)
if f_df is None: if f_df is None:
continue continue
@@ -216,9 +243,22 @@ def build_strategic_regime_frame(
funding_vals.append(float(f_hist["funding_rate"].mean())) funding_vals.append(float(f_hist["funding_rate"].mean()))
basis_vals.append(float(f_hist["basis"].iloc[-1])) basis_vals.append(float(f_hist["basis"].iloc[-1]))
positive_votes.append(float((f_hist["funding_rate"] > 0).mean())) positive_votes.append(float((f_hist["funding_rate"] > 0).mean()))
if len(f_hist) >= 3:
funding_acceleration_vals.append(float(f_hist["funding_rate"].tail(3).mean() - f_hist["funding_rate"].mean()))
basis_trend_vals.append(float(f_hist["basis"].tail(3).mean() - f_hist["basis"].mean()))
breadths.append(sum(votes) / len(votes) if votes else 0.5) breadths.append(sum(votes) / len(votes) if votes else 0.5)
mean_funding.append(sum(funding_vals) / len(funding_vals) if funding_vals else 0.0) mean_funding.append(sum(funding_vals) / len(funding_vals) if funding_vals else 0.0)
mean_basis.append(sum(basis_vals) / len(basis_vals) if basis_vals else 0.0) mean_basis.append(sum(basis_vals) / len(basis_vals) if basis_vals else 0.0)
funding_dispersion.append(float(pd.Series(funding_vals).std(ddof=0)) if len(funding_vals) >= 2 else 0.0)
basis_dispersion.append(float(pd.Series(basis_vals).std(ddof=0)) if len(basis_vals) >= 2 else 0.0)
mean_taker_imbalance.append(sum(taker_imbalance_vals) / len(taker_imbalance_vals) if taker_imbalance_vals else 0.0)
taker_imbalance_dispersion.append(float(pd.Series(taker_imbalance_vals).std(ddof=0)) if len(taker_imbalance_vals) >= 2 else 0.0)
positive_taker_ratio.append(float(sum(val > 0.10 for val in taker_imbalance_vals) / len(taker_imbalance_vals)) if taker_imbalance_vals else 0.0)
mean_volume_acceleration.append(sum(volume_accel_vals) / len(volume_accel_vals) if volume_accel_vals else 0.0)
positive_volume_accel_ratio.append(float(sum(val > 0.20 for val in volume_accel_vals) / len(volume_accel_vals)) if volume_accel_vals else 0.0)
return_dispersion_7d.append(float(pd.Series(alt_returns_7d).std(ddof=0)) if len(alt_returns_7d) >= 2 else 0.0)
mean_funding_acceleration.append(sum(funding_acceleration_vals) / len(funding_acceleration_vals) if funding_acceleration_vals else 0.0)
mean_basis_trend.append(sum(basis_trend_vals) / len(basis_trend_vals) if basis_trend_vals else 0.0)
positive_funding_ratio.append(sum(positive_votes) / len(positive_votes) if positive_votes else 0.5) positive_funding_ratio.append(sum(positive_votes) / len(positive_votes) if positive_votes else 0.5)
if idx >= 42: if idx >= 42:
@@ -233,6 +273,16 @@ def build_strategic_regime_frame(
prepared["breadth"] = breadths prepared["breadth"] = breadths
prepared["mean_alt_funding"] = mean_funding prepared["mean_alt_funding"] = mean_funding
prepared["mean_alt_basis"] = mean_basis prepared["mean_alt_basis"] = mean_basis
prepared["funding_dispersion"] = funding_dispersion
prepared["basis_dispersion"] = basis_dispersion
prepared["mean_taker_imbalance"] = mean_taker_imbalance
prepared["taker_imbalance_dispersion"] = taker_imbalance_dispersion
prepared["positive_taker_ratio"] = positive_taker_ratio
prepared["mean_alt_volume_accel"] = mean_volume_acceleration
prepared["positive_volume_accel_ratio"] = positive_volume_accel_ratio
prepared["alt_return_dispersion_7d"] = return_dispersion_7d
prepared["mean_funding_acceleration"] = mean_funding_acceleration
prepared["mean_basis_trend"] = mean_basis_trend
prepared["positive_funding_ratio"] = positive_funding_ratio prepared["positive_funding_ratio"] = positive_funding_ratio
prepared["btc_7d_return"] = btc_7d_return prepared["btc_7d_return"] = btc_7d_return
prepared["btc_30d_return"] = btc_30d_return prepared["btc_30d_return"] = btc_30d_return
@@ -240,6 +290,8 @@ def build_strategic_regime_frame(
prepared["intraday_trend_gap"] = prepared["close"] / prepared["ema_slow"] - 1.0 prepared["intraday_trend_gap"] = prepared["close"] / prepared["ema_slow"] - 1.0
prepared["breadth_persist"] = prepared["breadth"].rolling(18, min_periods=6).mean() prepared["breadth_persist"] = prepared["breadth"].rolling(18, min_periods=6).mean()
prepared["funding_persist"] = prepared["positive_funding_ratio"].rolling(18, min_periods=6).mean() prepared["funding_persist"] = prepared["positive_funding_ratio"].rolling(18, min_periods=6).mean()
prepared["taker_persist"] = prepared["positive_taker_ratio"].rolling(18, min_periods=6).mean()
prepared["volume_accel_persist"] = prepared["positive_volume_accel_ratio"].rolling(18, min_periods=6).mean()
regimes: list[str] = [] regimes: list[str] = []
for row in prepared.itertuples(index=False): for row in prepared.itertuples(index=False):

View File

@@ -36,7 +36,7 @@ from strategy32.research.soft_router import (
compose_cash_overlay_curve, compose_cash_overlay_curve,
compose_soft_router_curve, compose_soft_router_curve,
) )
from strategy32.signal.router import Strategy32Router from strategy32.routing.router import Strategy32Router
from strategy32.universe import filter_momentum_frame, limit_correlated_symbols, rank_momentum_universe, score_momentum_universe, select_dynamic_universe, select_strategic_universe from strategy32.universe import filter_momentum_frame, limit_correlated_symbols, rank_momentum_universe, score_momentum_universe, select_dynamic_universe, select_strategic_universe