Files
strategy32/scripts/run_current_cash_learned_blocker.py

406 lines
18 KiB
Python

from __future__ import annotations
import json
import sys
from dataclasses import asdict, dataclass
from pathlib import Path
import numpy as np
import pandas as pd
PACKAGE_PARENT = Path(__file__).resolve().parents[2]
if str(PACKAGE_PARENT) not in sys.path:
sys.path.insert(0, str(PACKAGE_PARENT))
from strategy32.live.runtime import BEST_CASH_OVERLAY
from strategy32.research.soft_router import (
build_cash_overlay_period_components,
compose_cash_overlay_curve,
load_component_bundle,
score_candidate,
segment_metrics,
)
from strategy32.scripts.run_current_relaxed_hybrid_experiment import (
CACHE_PATH,
CURRENT_OVERHEAT_OVERRIDES,
WINDOWS,
YEAR_PERIODS,
YTD_START,
)
OUT_JSON = Path("/tmp/strategy32_current_cash_learned_blocker.json")
@dataclass(frozen=True, slots=True)
class LearnedBlockerCandidate:
block_bars: int
train_min_blocks: int
lookback_blocks: int
ridge_alpha: float
prediction_threshold: float
blocked_scale: float
@property
def name(self) -> str:
return (
f"block:{self.block_bars}"
f"|train:{self.train_min_blocks}"
f"|lookback:{self.lookback_blocks}"
f"|alpha:{self.ridge_alpha:.2f}"
f"|th:{self.prediction_threshold:.4f}"
f"|blocked:{self.blocked_scale:.2f}"
)
def _build_strategy_detail(components: dict[str, object]) -> pd.DataFrame:
timestamps = list(components["timestamps"])
score_map = components["score_frame"].set_index("timestamp").sort_index()
cash_map = components["core_exposure_frame"].set_index("timestamp")["cash_pct"].sort_index()
curve, detail = compose_cash_overlay_curve(
timestamps=timestamps,
score_frame=components["score_frame"],
core_returns=components["core_returns"],
core_exposure_frame=components["core_exposure_frame"],
cap_returns=components["cap_returns"],
chop_returns=components["chop_returns"],
dist_returns=components["dist_returns"],
candidate=BEST_CASH_OVERLAY,
)
_ = curve
rows: list[dict[str, object]] = []
detail_map = detail.set_index("timestamp").sort_index()
for i in range(1, len(timestamps)):
signal_ts = pd.Timestamp(timestamps[i - 1])
execution_ts = pd.Timestamp(timestamps[i])
score_row = score_map.loc[signal_ts].to_dict() if signal_ts in score_map.index else {}
detail_row = detail_map.loc[execution_ts].to_dict() if execution_ts in detail_map.index else {}
core_cash_pct = float(cash_map.get(signal_ts, cash_map.iloc[-1] if not cash_map.empty else 1.0))
rows.append(
{
"timestamp": execution_ts,
"strategic_regime": str(score_row.get("strategic_regime", "")),
"core_score": float(score_row.get("core_score", 0.0)),
"panic_score": float(score_row.get("panic_score", 0.0)),
"choppy_score": float(score_row.get("choppy_score", 0.0)),
"distribution_score": float(score_row.get("distribution_score", 0.0)),
"breadth_persist": float(score_row.get("breadth_persist", 0.0) or 0.0),
"funding_persist": float(score_row.get("funding_persist", 0.0) or 0.0),
"taker_persist": float(score_row.get("taker_persist", 0.0) or 0.0),
"volume_accel_persist": float(score_row.get("volume_accel_persist", 0.0) or 0.0),
"mean_taker_imbalance": float(score_row.get("mean_taker_imbalance", 0.0) or 0.0),
"taker_imbalance_dispersion": float(score_row.get("taker_imbalance_dispersion", 0.0) or 0.0),
"positive_taker_ratio": float(score_row.get("positive_taker_ratio", 0.0) or 0.0),
"mean_alt_volume_accel": float(score_row.get("mean_alt_volume_accel", 0.0) or 0.0),
"positive_volume_accel_ratio": float(score_row.get("positive_volume_accel_ratio", 0.0) or 0.0),
"funding_dispersion": float(score_row.get("funding_dispersion", 0.0) or 0.0),
"basis_dispersion": float(score_row.get("basis_dispersion", 0.0) or 0.0),
"alt_return_dispersion_7d": float(score_row.get("alt_return_dispersion_7d", 0.0) or 0.0),
"mean_funding_acceleration": float(score_row.get("mean_funding_acceleration", 0.0) or 0.0),
"mean_basis_trend": float(score_row.get("mean_basis_trend", 0.0) or 0.0),
"cash_pct": core_cash_pct,
"invested_pct": max(0.0, 1.0 - core_cash_pct),
"portfolio_return": float(detail_row.get("portfolio_return", 0.0)),
}
)
return pd.DataFrame(rows)
def _curve_from_returns(returns: pd.Series) -> pd.Series:
equity = 1000.0
vals = [equity]
idx = [returns.index[0] - pd.Timedelta(hours=4)]
for ts, ret in returns.items():
equity *= max(0.0, 1.0 + float(ret))
idx.append(pd.Timestamp(ts))
vals.append(equity)
return pd.Series(vals, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _metrics_for_curve(curve: pd.Series, latest_bar: pd.Timestamp) -> tuple[dict[str, dict[str, float]], dict[str, dict[str, float]], float, int, int]:
windows = {
label: segment_metrics(curve, latest_bar - pd.Timedelta(days=days), latest_bar)
for days, label in WINDOWS
}
years = {
label: segment_metrics(curve, start, min(latest_bar, end_exclusive - pd.Timedelta(seconds=1)))
for label, start, end_exclusive in YEAR_PERIODS
}
years["2026_YTD"] = segment_metrics(curve, YTD_START, latest_bar)
score, negative_years, mdd_violations = score_candidate(
{label: windows[label] for _, label in WINDOWS},
{label: years[label] for label, _, _ in YEAR_PERIODS},
)
return windows, years, score, negative_years, mdd_violations
def _ridge_predict(train_x: np.ndarray, train_y: np.ndarray, test_x: np.ndarray, alpha: float) -> float:
if len(train_x) == 0:
return 0.0
train_x = np.nan_to_num(train_x, nan=0.0, posinf=0.0, neginf=0.0)
train_y = np.nan_to_num(train_y, nan=0.0, posinf=0.0, neginf=0.0)
test_x = np.nan_to_num(test_x, nan=0.0, posinf=0.0, neginf=0.0)
mean = train_x.mean(axis=0)
std = train_x.std(axis=0)
std[std < 1e-9] = 1.0
x_train = np.clip((train_x - mean) / std, -8.0, 8.0)
x_test = np.clip((test_x - mean) / std, -8.0, 8.0)
train_y = np.clip(train_y, -0.50, 0.50)
x_train = np.column_stack([np.ones(len(x_train)), x_train])
x_test = np.concatenate([[1.0], x_test])
penalty = np.eye(x_train.shape[1]) * alpha
penalty[0, 0] = 0.0
lhs = x_train.T @ x_train + penalty
rhs = x_train.T @ train_y
try:
beta = np.linalg.solve(lhs, rhs)
except np.linalg.LinAlgError:
beta = np.linalg.pinv(lhs) @ rhs
return float(x_test @ beta)
def _build_regime_columns(detail: pd.DataFrame) -> list[str]:
regime_dummies = pd.get_dummies(detail["strategic_regime"], prefix="regime", dtype=float)
for column in regime_dummies.columns:
detail[column] = regime_dummies[column]
return sorted(regime_dummies.columns.tolist())
def _build_block_dataset(detail: pd.DataFrame, block_bars: int, regime_columns: list[str]) -> pd.DataFrame:
rows: list[dict[str, object]] = []
frame = detail.copy()
frame["trailing_total_21"] = frame["portfolio_return"].shift(1).rolling(21, min_periods=6).sum()
frame["trailing_total_42"] = frame["portfolio_return"].shift(1).rolling(42, min_periods=6).sum()
frame["trailing_core_score_21"] = frame["core_score"].shift(1).rolling(21, min_periods=6).mean()
frame["trailing_breadth_21"] = frame["breadth_persist"].shift(1).rolling(21, min_periods=6).mean()
frame["trailing_choppy_21"] = frame["choppy_score"].shift(1).rolling(21, min_periods=6).mean()
for start in range(0, len(frame), block_bars):
block = frame.iloc[start : start + block_bars]
if block.empty:
continue
trigger = block.iloc[0]
block_total = float((1.0 + block["portfolio_return"]).prod() - 1.0)
row = {
"timestamp": trigger["timestamp"],
"block_total": block_total,
"core_score": float(trigger["core_score"]),
"breadth_persist": float(trigger["breadth_persist"]),
"funding_persist": float(trigger["funding_persist"]),
"taker_persist": float(trigger["taker_persist"]),
"volume_accel_persist": float(trigger["volume_accel_persist"]),
"mean_taker_imbalance": float(trigger["mean_taker_imbalance"]),
"taker_imbalance_dispersion": float(trigger["taker_imbalance_dispersion"]),
"positive_taker_ratio": float(trigger["positive_taker_ratio"]),
"mean_alt_volume_accel": float(trigger["mean_alt_volume_accel"]),
"positive_volume_accel_ratio": float(trigger["positive_volume_accel_ratio"]),
"funding_dispersion": float(trigger["funding_dispersion"]),
"basis_dispersion": float(trigger["basis_dispersion"]),
"alt_return_dispersion_7d": float(trigger["alt_return_dispersion_7d"]),
"mean_funding_acceleration": float(trigger["mean_funding_acceleration"]),
"mean_basis_trend": float(trigger["mean_basis_trend"]),
"panic_score": float(trigger["panic_score"]),
"choppy_score": float(trigger["choppy_score"]),
"distribution_score": float(trigger["distribution_score"]),
"cash_pct": float(trigger["cash_pct"]),
"invested_pct": float(trigger["invested_pct"]),
"trailing_total_21": float(trigger["trailing_total_21"]) if pd.notna(trigger["trailing_total_21"]) else 0.0,
"trailing_total_42": float(trigger["trailing_total_42"]) if pd.notna(trigger["trailing_total_42"]) else 0.0,
"trailing_core_score_21": float(trigger["trailing_core_score_21"]) if pd.notna(trigger["trailing_core_score_21"]) else 0.0,
"trailing_breadth_21": float(trigger["trailing_breadth_21"]) if pd.notna(trigger["trailing_breadth_21"]) else 0.0,
"trailing_choppy_21": float(trigger["trailing_choppy_21"]) if pd.notna(trigger["trailing_choppy_21"]) else 0.0,
"block_start_index": int(start),
"block_end_index": int(block.index[-1]),
}
for column in regime_columns:
row[column] = float(trigger.get(column, 0.0))
rows.append(row)
return pd.DataFrame(rows)
def _feature_columns(regime_columns: list[str]) -> list[str]:
return [
"core_score",
"breadth_persist",
"funding_persist",
"taker_persist",
"volume_accel_persist",
"mean_taker_imbalance",
"taker_imbalance_dispersion",
"positive_taker_ratio",
"mean_alt_volume_accel",
"positive_volume_accel_ratio",
"funding_dispersion",
"basis_dispersion",
"alt_return_dispersion_7d",
"mean_funding_acceleration",
"mean_basis_trend",
"panic_score",
"choppy_score",
"distribution_score",
"cash_pct",
"invested_pct",
"trailing_total_21",
"trailing_total_42",
"trailing_core_score_21",
"trailing_breadth_21",
"trailing_choppy_21",
*regime_columns,
]
def _simulate_candidate(
detail: pd.DataFrame,
block_frame: pd.DataFrame,
regime_columns: list[str],
candidate: LearnedBlockerCandidate,
) -> pd.Series:
rows = detail.reset_index(drop=True)
features = _feature_columns(regime_columns)
returns: list[float] = []
idx: list[pd.Timestamp] = []
for block_idx, block in block_frame.iterrows():
start_idx = int(block["block_start_index"])
end_idx = int(block["block_end_index"])
bar_block = rows.iloc[start_idx : end_idx + 1]
exposure_scale = 1.0
if block_idx >= candidate.train_min_blocks:
train_start = max(0, block_idx - candidate.lookback_blocks)
train = block_frame.iloc[train_start:block_idx]
train_x = train[features].to_numpy(dtype=float)
train_y = train["block_total"].to_numpy(dtype=float)
test_x = block[features].to_numpy(dtype=float)
pred = _ridge_predict(train_x, train_y, test_x, candidate.ridge_alpha)
if pred <= candidate.prediction_threshold:
exposure_scale = candidate.blocked_scale
for row in bar_block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * exposure_scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _oracle_blocker_curve(detail: pd.DataFrame, block_bars: int) -> pd.Series:
rows = detail.reset_index(drop=True)
returns: list[float] = []
idx: list[pd.Timestamp] = []
for start in range(0, len(rows), block_bars):
block = rows.iloc[start : start + block_bars]
if block.empty:
continue
total = float((1.0 + block["portfolio_return"]).prod() - 1.0)
scale = 1.0 if total > 0.0 else 0.0
for row in block.itertuples(index=False):
returns.append(float(getattr(row, "portfolio_return")) * scale)
idx.append(pd.Timestamp(getattr(row, "timestamp")))
return pd.Series(returns, index=pd.DatetimeIndex(idx, name="timestamp"), dtype=float)
def _candidate_space() -> list[LearnedBlockerCandidate]:
space: list[LearnedBlockerCandidate] = []
for block_bars in (21, 42, 84):
for train_min_blocks in (8, 12, 18):
for lookback_blocks in (24, 60):
if lookback_blocks < train_min_blocks:
continue
for ridge_alpha in (0.5, 1.0, 5.0, 20.0):
for prediction_threshold in (-0.0025, 0.0, 0.0025, 0.0050, 0.0100):
for blocked_scale in (0.0, 0.25, 0.50):
space.append(
LearnedBlockerCandidate(
block_bars=block_bars,
train_min_blocks=train_min_blocks,
lookback_blocks=lookback_blocks,
ridge_alpha=ridge_alpha,
prediction_threshold=prediction_threshold,
blocked_scale=blocked_scale,
)
)
return space
def main() -> None:
bundle, latest_bar = load_component_bundle(CACHE_PATH)
eval_start = latest_bar - pd.Timedelta(days=1825)
print("[phase] build current baseline", flush=True)
components = build_cash_overlay_period_components(
bundle=bundle,
eval_start=eval_start,
eval_end=latest_bar,
profile_name=BEST_CASH_OVERLAY.regime_profile,
core_filter=BEST_CASH_OVERLAY.core_filter,
cap_engine=BEST_CASH_OVERLAY.cap_engine,
chop_engine=BEST_CASH_OVERLAY.chop_engine,
dist_engine=BEST_CASH_OVERLAY.dist_engine,
core_config_overrides=CURRENT_OVERHEAT_OVERRIDES,
)
detail = _build_strategy_detail(components)
regime_columns = _build_regime_columns(detail)
baseline_curve = _curve_from_returns(detail.set_index("timestamp")["portfolio_return"])
baseline_windows, baseline_years, *_ = _metrics_for_curve(baseline_curve, latest_bar)
oracle_summary: dict[str, object] = {}
for block in (21, 42, 84):
oracle_curve = _curve_from_returns(_oracle_blocker_curve(detail, block))
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(oracle_curve, latest_bar)
oracle_summary[f"oracle_block_{block}"] = {
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
candidates = _candidate_space()
print(f"[phase] blocker search {len(candidates)} candidates", flush=True)
best_payload: dict[str, object] | None = None
search_top: list[dict[str, object]] = []
for idx, candidate in enumerate(candidates, start=1):
block_frame = _build_block_dataset(detail, candidate.block_bars, regime_columns)
simulated_returns = _simulate_candidate(detail, block_frame, regime_columns, candidate)
curve = _curve_from_returns(simulated_returns)
windows, years, score, negative_years, mdd_violations = _metrics_for_curve(curve, latest_bar)
payload = {
"candidate": asdict(candidate),
"name": candidate.name,
"score": score,
"negative_years": negative_years,
"mdd_violations": mdd_violations,
"windows": windows,
"years": years,
}
search_top.append(payload)
search_top.sort(key=lambda item: float(item["score"]), reverse=True)
search_top = search_top[:5]
if best_payload is None or score > float(best_payload["score"]):
best_payload = payload
if idx % max(1, len(candidates) // 8) == 0:
print(f"[search] {idx}/{len(candidates)}", flush=True)
assert best_payload is not None
output = {
"analysis": "current_cash_learned_blocker",
"latest_bar": str(latest_bar),
**best_payload,
"baseline": {
"windows": baseline_windows,
"years": baseline_years,
},
"oracle": oracle_summary,
"search_top": search_top,
}
print(json.dumps(output, indent=2))
OUT_JSON.write_text(json.dumps(output, indent=2), encoding="utf-8")
print(f"[saved] {OUT_JSON}", flush=True)
if __name__ == "__main__":
main()