factors_r = ["SP500", "DTWEXAFEGS"] # "SP500" does not contain dividends; note: "DTWEXM" discontinued as of Jan 2020
factors_d = ["DGS10", "BAMLH0A0HYM2"]tickers = ["BAICX"] # fund inception date is "2011-11-28"intercept = TrueRegression analysis
import math
import statsmodels.api as smOrdinary least squares
Coefficients
\[ \begin{aligned} \hat{\beta}=(X^\mathrm{T}WX)^{-1}X^\mathrm{T}Wy \end{aligned} \]
def lm_coef(x, y, weights, intercept):
if (intercept): x = sm.add_constant(x)
result = np.dot(np.linalg.inv(np.dot(x.T, np.multiply(weights, x))),
np.dot(x.T, np.multiply(weights, y)))
return np.ravel(result)lm_coef(overlap_x_df, overlap_y_df, weights, intercept)array([ 1.53609386e-04, 1.77989790e-01, -9.46831661e-02, 3.05095152e+00,
1.19191039e+00])
if (intercept): overlap_x_df = sm.add_constant(overlap_x_df)
fit = sm.WLS(overlap_y_df, overlap_x_df, weights = weights).fit()
if (intercept): overlap_x_df = overlap_x_df.iloc[:, 1:]
np.array(fit.params)array([ 1.53609386e-04, 1.77989790e-01, -9.46831661e-02, 3.05095152e+00,
1.19191039e+00])
R-squared
\[ \begin{aligned} R^{2}=\frac{\hat{\beta}^\mathrm{T}(X^\mathrm{T}WX)\hat{\beta}}{y^\mathrm{T}Wy} \end{aligned} \]
def lm_rsq(x, y, weights, intercept):
coef = np.matrix(lm_coef(x, y, weights, intercept))
if (intercept):
x = sm.add_constant(x)
x = x - np.average(x, axis = 0, weights = weights.reshape(-1))
y = y - np.average(y, axis = 0, weights = weights.reshape(-1))
result = np.dot(coef, np.dot(np.dot(x.T, np.multiply(weights, x)), coef.T)) / \
np.dot(y.T, np.multiply(weights, y))
return np.float64(result.item())lm_rsq(overlap_x_df, overlap_y_df, weights, intercept)np.float64(0.7255998953865602)
fit.rsquarednp.float64(0.7255998953865596)
Standard errors
\[ \begin{aligned} \sigma_{\hat{\beta}}^{2}&=\sigma_{\varepsilon}^{2}(X^\mathrm{T}WX)^{-1}\\ &=\frac{(1-R^{2})}{n-p}(X^\mathrm{T}WX)^{-1}\\ &=\frac{SSE}{df_{E}}(X^\mathrm{T}WX)^{-1}\\ \sigma_{\hat{\alpha}}^{2}&=\sigma_{\varepsilon}^{2}\left(\frac{1}{n}+\mu^\mathrm{T}(X^\mathrm{T}WX)^{-1}\mu\right) \end{aligned} \]
def lm_se(x, y, weights, intercept):
n_rows = x.shape[0]
n_cols = x.shape[1]
rsq = lm_rsq(x, y, weights, intercept)
if (intercept):
x = sm.add_constant(x)
y = y - np.average(y, axis = 0, weights = weights.reshape(-1))
df_resid = n_rows - n_cols - 1
else:
df_resid = n_rows - n_cols
var_y = np.dot(y.T, np.multiply(weights, y))
var_resid = (1 - rsq) * var_y / df_resid
result = np.sqrt(var_resid * np.linalg.inv(np.dot(x.T, np.multiply(weights, x))).diagonal())
return np.ravel(result)lm_se(overlap_x_df, overlap_y_df, weights, intercept)array([6.38441707e-05, 1.97318740e-02, 3.37317546e-02, 2.65647055e-01,
2.28577769e-01])
np.array(fit.bse)array([6.38441707e-05, 1.97318740e-02, 3.37317546e-02, 2.65647055e-01,
2.28577769e-01])
Shapley values
\[ R^{2}_{i}=\sum_{S\subseteq N\setminus\{i\}}{\frac{|S|!\;(n-|S|-1)!}{n!}}(R^{2}(S\cup\{i\})-R^{2}(S)) \]
def lm_shap(x, y, weights, intercept):
n_rows = x.shape[0]
n_cols = x.shape[1]
n_combn = 2 ** n_cols
n_vec = np.zeros(n_combn)
ix_mat = np.zeros((n_cols, n_combn))
rsq = np.zeros(n_combn)
result = np.zeros(n_cols)
# number of binary combinations
for k in range(n_combn):
n = 0
n_size = k
# find the binary combination
for j in range(n_cols):
if n_size % 2 == 0:
n += 1
ix_mat[j, k] = j + 1
n_size //= 2
n_vec[k] = n
if n > 0:
ix_subset = np.where(ix_mat[:, k] != 0)[0]
x_subset = x.iloc[:, ix_subset]
rsq[k] = lm_rsq(x_subset, y, weights, intercept)
# calculate the exact Shapley value for r-squared
for j in range(n_cols):
ix_pos = np.where(ix_mat[j, :] != 0)[0]
ix_neg = np.where(ix_mat[j, :] == 0)[0]
ix_n = n_vec[ix_neg]
rsq_diff = rsq[ix_pos] - rsq[ix_neg]
for k in range(int(n_combn / 2)):
s = int(ix_n[k])
weight = math.factorial(s) * math.factorial(n_cols - s - 1) \
/ math.factorial(n_cols)
result[j] += weight * rsq_diff[k]
return resultlm_shap(overlap_x_df, overlap_y_df, weights, intercept)array([0.32792107, 0.01160852, 0.13432931, 0.251741 ])
Principal component regression
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegressioncomps = 1Coefficients
\[ \begin{aligned} W_{k}&=\mathbf{X}V_{k}=[\mathbf{X}\mathbf{v}_{1},\ldots,\mathbf{X}\mathbf{v}_{k}]\\ {\widehat{\gamma}}_{k}&=\left(W_{k}^\mathrm{T}W_{k}\right)^{-1}W_{k}^\mathrm{T}\mathbf{Y}\\ {\widehat{\boldsymbol{\beta}}}_{k}&=V_{k}{\widehat{\gamma}}_{k} \end{aligned} \]
def pcr_coef(x, y, comps):
x = x - np.average(x, axis = 0)
L, V = np.linalg.eig(np.cov(x.T, ddof = 1))
idx = L.argsort()[::-1]
V = V[:, idx]
W = np.dot(x, V)
gamma = np.dot(np.dot(np.linalg.inv(np.dot(W.T, W)), W.T), y)
result = np.dot(V[:, :comps], gamma[:comps])
return np.ravel(result)scale_x_df = (overlap_x_df - np.average(overlap_x_df, axis = 0)) \
/ np.std(overlap_x_df, axis = 0, ddof = 1)pcr_coef(scale_x_df, overlap_y_df, comps)array([ 6.31488384e-04, 3.20873608e-05, -1.55003056e-04, 6.37534918e-04])
pcr_coef(overlap_x_df, overlap_y_df, comps)array([ 0.2516992 , 0.00196746, -0.00073302, 0.01786965])
pca = PCA(n_components = len(factors))
pca_x_df = pca.fit_transform(scale_x_df)
fit = LinearRegression(fit_intercept = False).fit(pca_x_df, overlap_y_df)
gamma = fit.coef_
np.dot(pca.components_.T[:, :comps], gamma.T[:comps]).ravel()array([ 6.31488384e-04, 3.20873608e-05, -1.55003056e-04, 6.37534918e-04])
R-squared
def pcr_rsq(x, y, comps):
coef = np.matrix(pcr_coef(x, y, comps))
x = x - np.average(x, axis = 0)
y = y - np.average(y, axis = 0)
result = np.dot(np.dot(coef, np.dot(x.T, x)), coef.T) / np.dot(y.T, y)
return np.float64(result.item())pcr_rsq(scale_x_df, overlap_y_df, comps)np.float64(0.46581979359648357)
pcr_rsq(overlap_x_df, overlap_y_df, comps)np.float64(0.5472476072565008)
Standard errors
\[ \begin{aligned} \text{Var}({\widehat{\boldsymbol{\beta}}}_{k})&=\sigma^{2}V_{k}(W_{k}^\mathrm{T}W_{k})^{-1}V_{k}^\mathrm{T}\\ &=\sigma^{2}V_{k}\text{diag}\left(\lambda_{1}^{-1},\ldots,\lambda_{k}^{-1}\right)V_{k}^\mathrm{T}\\ &=\sigma^{2}\sum_{j=1}^{k}{\frac{\mathbf{v}_{j}\mathbf{v}_{j}^\mathrm{T}}{\lambda_{j}}} \end{aligned} \]
# unable to verify the result
def pcr_se(x, y, comps):
n_rows = x.shape[0]
n_cols = x.shape[1]
rsq = pcr_rsq(x, y, comps)
y = y - np.average(y, axis = 0)
df_resid = n_rows - n_cols - 1
var_y = np.dot(y.T, y)
var_resid = (1 - rsq) * var_y / df_resid
# uses statsmodels for illustrative purposes
pca = sm.multivariate.PCA(x, standardize = False, demean = True)
L = pca.eigenvals[:comps]
V = pca.eigenvecs.iloc[:, :comps]
result = np.sqrt(var_resid * np.dot(V, np.dot(np.diag(1 / L), V.T)).diagonal())
return np.ravel(result)pcr_se(scale_x_df, overlap_y_df, comps)array([4.30280650e-05, 2.18635383e-06, 1.05615269e-05, 4.34400609e-05])
pcr_se(overlap_x_df, overlap_y_df, comps)array([1.45670563e-02, 1.13866291e-04, 4.24232577e-05, 1.03420325e-03])
Partial least squares
Risk decomposition
Standalone risk
\[ \begin{aligned} \text{SAR}_{k}&=\sqrt{w_{k}^{2}\sigma_{k}^{2}}\\ \text{SAR}_{\varepsilon}&=\sqrt{(1-R^{2})\sigma_{y}^{2}} \end{aligned} \]
def cov_wt(x, weights, center):
sum_w = sum(weights)
sumsq_w = sum(np.power(weights, 2))
if (center):
x = x - np.average(x, axis = 0, weights = weights.reshape(-1))
result = np.dot(x.T, np.multiply(weights, x)) / (sum_w - sumsq_w / sum_w)
return resultdef lm_sar(x, y, weights, intercept):
coef = lm_coef(x, y, weights, intercept)
rsq = lm_rsq(x, y, weights, intercept)
if (intercept): x = sm.add_constant(x)
# sigma = np.cov(np.concatenate((x, y), axis = 1).T,
# aweights = weights.reshape(-1))
sigma = cov_wt(np.concatenate((x, y), axis = 1), weights, intercept)
sar = np.multiply(np.power(coef, 2).T, sigma[:-1, :-1].diagonal())
sar_eps = (1 - rsq) * sigma[-1, -1]
result = np.sqrt(np.concatenate((np.matrix(sigma[-1, -1]),
np.matrix(sar),
np.matrix(sar_eps)), axis = 1))
return np.ravel(result)lm_sar(overlap_x_df, overlap_y_df, weights, intercept) * np.sqrt(scale["periods"] * scale["overlap"])array([0.06427939, 0. , 0.0334555 , 0.00604166, 0.02500491,
0.01947359, 0.03367161])
Risk contribution
\[ \begin{aligned} \text{MCR}&=w^\mathrm{T}\frac{\partial\sigma_{y}}{\partial w}\\ &=w^\mathrm{T}\frac{\Sigma w}{\sigma_{y}}\\ \text{MCR}_{\varepsilon}&=\sigma_{y}-\sum_{k=1}^{n}\text{MCR}_{k} \end{aligned} \]
def lm_mcr(x, y, weights, intercept):
coef = np.matrix(lm_coef(x, y, weights, intercept)).T
rsq = lm_rsq(x, y, weights, intercept)
if (intercept): x = sm.add_constant(x)
# sigma = np.cov(np.concatenate((x, y), axis = 1).T,
# aweights = weights.reshape(-1))
sigma = cov_wt(np.concatenate((x, y), axis = 1), weights, intercept)
mcr = np.multiply(coef, np.dot(sigma[:-1, :-1], coef)) / np.sqrt(sigma[-1, -1])
mcr_eps = np.sqrt(sigma[-1, -1]) - sum(mcr)
result = np.concatenate((np.sqrt(np.matrix(sigma[-1, -1])),
np.matrix(mcr).T,
np.matrix(mcr_eps)), axis = 1)
return np.ravel(result)lm_mcr(overlap_x_df, overlap_y_df, weights, intercept) * np.sqrt(scale["periods"] * scale["overlap"])array([0.06427939, 0. , 0.02474778, 0.00070433, 0.0080525 ,
0.01313651, 0.01763827])
Scenario analysis
Implied shocks
\[ \begin{aligned} \hat{\beta}&=(Z^\mathrm{T}WZ)^{-1}Z^\mathrm{T}WX \end{aligned} \]
def implied_shocks(shocks, x, z, weights):
beta = np.linalg.lstsq(np.multiply(weights, z), np.multiply(weights, x), rcond = None)[0]
result = np.dot(shocks, beta)
return resultshocks = np.array([-0.1, 0.1])
overlap_z_df = overlap_x_df.iloc[:, [0, 1]]implied_shocks(shocks, overlap_x_df, overlap_z_df, weights)array([-0.1 , 0.1 , -0.00131846, -0.00678959])
Stress P&L
def pnl_stress(shocks, x, y, z, weights, intercept):
coef = lm_coef(x, y, weights, intercept)
if (intercept): x = sm.add_constant(x)
result = np.multiply(coef.T, implied_shocks(shocks, x, z, weights))
return resultpnl_stress(shocks, overlap_x_df, overlap_y_df, overlap_z_df, weights, intercept)array([-0.00218203, -0.01779898, -0.00946832, -0.00402257, -0.00809259])