Python Implementation Examples
All code examples below use syntax highlighting for better readability.
Daily IC Calculation
import numpy as np
import pandas as pd
def calculate_daily_ic(peak_ic, peak_horizon, rise_hl, decay_hl):
"""
Calculate daily IC from decay curve parameters.
Parameters
----------
peak_ic : float
Information coefficient at peak horizon
peak_horizon : int
Horizon (in days) where IC peaks
rise_hl : float
Rise half-life in days
decay_hl : float
Decay half-life in days
Returns
-------
float
Daily information coefficient
"""
h = 1 # one day horizon
rise_term = 1 - np.exp(-h / rise_hl)
decay_term = np.exp(-max(0, h - peak_horizon) / decay_hl)
return peak_ic * rise_term * decay_term
# Example usage
daily_ic = calculate_daily_ic(
peak_ic=0.05,
peak_horizon=21,
rise_hl=5.0,
decay_hl=30.0
)
print(f"Daily IC: {daily_ic:.4f}")
Signal Weight Computation
def compute_signal_weights(daily_ics, transaction_costs, shrinkage=0.4):
"""
Compute signal weights from daily ICs using shrinkage.
Parameters
----------
daily_ics : np.ndarray
Array of daily ICs for each signal
transaction_costs : np.ndarray
Annual transaction costs for each signal
shrinkage : float, optional
Shrinkage parameter (default: 0.4)
Returns
-------
np.ndarray
Final signal weights
"""
# Convert annual costs to daily
daily_costs = transaction_costs / 252
# Net daily IC after costs
net_daily_ic = daily_ics - daily_costs
# Ensure no negative weights
net_daily_ic = np.maximum(net_daily_ic, 0)
# IC-weighted allocation
ic_weights = net_daily_ic / net_daily_ic.sum()
# Equal weight allocation
n_signals = len(daily_ics)
equal_weights = np.ones(n_signals) / n_signals
# Shrinkage combination
final_weights = shrinkage * equal_weights + (1 - shrinkage) * ic_weights
return final_weights
# Example usage
signals = ['fast_mr', 'med_mom', 'slow_mom', 'carry']
daily_ics = np.array([0.035, 0.012, 0.004, 0.002])
costs = np.array([0.50, 0.30, 0.20, 0.10]) # Annual %
weights = compute_signal_weights(daily_ics, costs, shrinkage=0.4)
for signal, weight in zip(signals, weights):
print(f"{signal:12s}: {weight:.1%}")
Covariance Matrix Estimation
def estimate_covariance(returns, halflife=21, min_periods=252):
"""
Estimate exponentially weighted covariance matrix.
Parameters
----------
returns : pd.DataFrame
DataFrame of asset returns (dates x assets)
halflife : int
Half-life for exponential weighting
min_periods : int
Minimum number of periods required
Returns
-------
pd.DataFrame
Covariance matrix
"""
# Calculate decay factor
alpha = 1 - np.exp(-np.log(2) / halflife)
# Exponentially weighted covariance
cov_matrix = returns.ewm(
alpha=alpha,
min_periods=min_periods,
adjust=False
).cov()
# Get most recent estimate
last_date = returns.index[-1]
current_cov = cov_matrix.loc[last_date]
return current_cov
# Example usage
returns = pd.DataFrame({
'ES': np.random.randn(500) * 0.01,
'TY': np.random.randn(500) * 0.005,
'GC': np.random.randn(500) * 0.015
})
cov = estimate_covariance(returns, halflife=21)
print("Covariance Matrix:")
print(cov)
Portfolio Optimization with Constraints
from scipy.optimize import minimize
def optimize_portfolio(expected_returns, cov_matrix, risk_aversion=1.0,
constraints=None, bounds=None):
"""
Mean-variance portfolio optimization.
Parameters
----------
expected_returns : np.ndarray
Expected returns for each asset
cov_matrix : np.ndarray
Covariance matrix
risk_aversion : float
Risk aversion parameter (lambda)
constraints : list, optional
List of constraint dictionaries
bounds : tuple, optional
Bounds for each weight
Returns
-------
dict
Optimization result with weights and metrics
"""
n_assets = len(expected_returns)
# Objective: maximize returns - lambda * variance
def objective(weights):
portfolio_return = weights @ expected_returns
portfolio_variance = weights @ cov_matrix @ weights
return -(portfolio_return - risk_aversion * portfolio_variance)
# Default constraints: weights sum to 1
if constraints is None:
constraints = [
{'type': 'eq', 'fun': lambda w: np.sum(w) - 1}
]
# Default bounds: no short selling
if bounds is None:
bounds = tuple((0, 1) for _ in range(n_assets))
# Initial guess: equal weight
x0 = np.ones(n_assets) / n_assets
# Optimize
result = minimize(
objective,
x0,
method='SLSQP',
bounds=bounds,
constraints=constraints,
options={'maxiter': 1000}
)
if not result.success:
raise ValueError(f"Optimization failed: {result.message}")
weights = result.x
return {
'weights': weights,
'expected_return': weights @ expected_returns,
'volatility': np.sqrt(weights @ cov_matrix @ weights),
'sharpe_ratio': (weights @ expected_returns) /
np.sqrt(weights @ cov_matrix @ weights)
}
# Example usage
exp_returns = np.array([0.08, 0.06, 0.10])
cov = np.array([
[0.04, 0.01, 0.02],
[0.01, 0.02, 0.01],
[0.02, 0.01, 0.06]
])
portfolio = optimize_portfolio(
exp_returns,
cov,
risk_aversion=2.0
)
print(f"Optimal weights: {portfolio['weights']}")
print(f"Expected return: {portfolio['expected_return']:.2%}")
print(f"Volatility: {portfolio['volatility']:.2%}")
print(f"Sharpe ratio: {portfolio['sharpe_ratio']:.2f}")
Adaptive Covariance Scaling
class AdaptiveCovarianceScaler:
"""Scale covariance matrix based on recent volatility regime."""
def __init__(self, fast_hl=5, slow_hl=42):
"""
Initialize scaler.
Parameters
----------
fast_hl : int
Fast half-life for volatility (used for increases)
slow_hl : int
Slow half-life for volatility (used for decreases)
"""
self.fast_hl = fast_hl
self.slow_hl = slow_hl
def scale_covariance(self, cov_matrix, returns, lookback=252):
"""
Scale covariance matrix based on volatility regime.
Parameters
----------
cov_matrix : np.ndarray
Base covariance matrix
returns : pd.DataFrame
Recent returns data
lookback : int
Lookback period for long-term volatility
Returns
-------
np.ndarray
Scaled covariance matrix
"""
# Calculate portfolio volatility
recent_vol = returns.tail(self.fast_hl).std()
longterm_vol = returns.tail(lookback).std()
# Asymmetric scaling
vol_ratio = recent_vol / longterm_vol
# Use fast adjustment if vol increased, slow if decreased
if vol_ratio > 1:
scale_factor = vol_ratio ** 2
else:
# Slower response to vol decreases
scale_factor = 1 + (vol_ratio - 1) * (self.fast_hl / self.slow_hl)
scale_factor = max(scale_factor, 0.5) # Floor at 50%
return cov_matrix * scale_factor
# Example usage
scaler = AdaptiveCovarianceScaler(fast_hl=5, slow_hl=42)
returns = pd.DataFrame({
'asset1': np.random.randn(300) * 0.01,
'asset2': np.random.randn(300) * 0.01
})
base_cov = returns.cov()
scaled_cov = scaler.scale_covariance(base_cov, returns)
print("Base covariance:")
print(base_cov)
print("\nScaled covariance:")
print(scaled_cov)
Backtesting Framework
class SimpleBacktest:
"""Simple backtesting framework for signal strategies."""
def __init__(self, initial_capital=1_000_000):
self.initial_capital = initial_capital
self.positions = []
self.trades = []
def run(self, signals, prices, transaction_cost=0.001):
"""
Run backtest.
Parameters
----------
signals : pd.DataFrame
Signal values (dates x assets)
prices : pd.DataFrame
Asset prices (dates x assets)
transaction_cost : float
Transaction cost as fraction of trade value
Returns
-------
pd.DataFrame
Portfolio performance
"""
portfolio_value = [self.initial_capital]
cash = self.initial_capital
holdings = {asset: 0 for asset in prices.columns}
for date in signals.index:
# Current signal
signal = signals.loc[date]
# Rebalance portfolio
total_value = cash + sum(
holdings[asset] * prices.loc[date, asset]
for asset in holdings
)
# Target positions
for asset in signal.index:
target_value = total_value * signal[asset]
current_value = holdings[asset] * prices.loc[date, asset]
# Trade difference
trade_value = target_value - current_value
if abs(trade_value) > 0:
# Apply transaction cost
cost = abs(trade_value) * transaction_cost
cash -= cost
# Update holdings
shares = trade_value / prices.loc[date, asset]
holdings[asset] += shares
cash -= trade_value
self.trades.append({
'date': date,
'asset': asset,
'shares': shares,
'price': prices.loc[date, asset],
'value': trade_value,
'cost': cost
})
# Record portfolio value
pv = cash + sum(
holdings[asset] * prices.loc[date, asset]
for asset in holdings
)
portfolio_value.append(pv)
return pd.Series(portfolio_value, index=[signals.index[0]] + list(signals.index))
# Example usage
dates = pd.date_range('2020-01-01', periods=252, freq='D')
signals = pd.DataFrame({
'ES': np.random.randn(252) * 0.1,
'TY': np.random.randn(252) * 0.1
}, index=dates)
# Normalize signals to sum to 1
signals = signals.div(signals.abs().sum(axis=1), axis=0)
prices = pd.DataFrame({
'ES': 3000 + np.random.randn(252).cumsum() * 10,
'TY': 130 + np.random.randn(252).cumsum() * 0.5
}, index=dates)
backtest = SimpleBacktest(initial_capital=1_000_000)
performance = backtest.run(signals, prices)
print(f"Initial value: ${performance.iloc[0]:,.0f}")
print(f"Final value: ${performance.iloc[-1]:,.0f}")
print(f"Total return: {(performance.iloc[-1] / performance.iloc[0] - 1):.2%}")
Add more code examples here as you develop your implementation!