在 PyMC3 中复制 Hamilton 1989 Markov Switching 模型

问题描述

我试图了解 Hamilton 1989 年的马尔可夫转换自回归模型。我试图用贝叶斯扭曲来重现结果。我使用 Eric Ma's tutorial about HMM's in PyMC3 编写了许多 PyMC3 模型,最新的迭代可以在下面找到。

在没有自回归的情况下,模型会收敛到接近 Hamilton (1.16 和 -0.36) 和现实转移概率的 mu 值。 但是,当添加自回归时,模型无法收敛,无法拟合接近 Hamilton 结果的系数。转移概率的拟合特别差。

我错过了什么?

# %%
import pymc3 as pm
import theano.tensor as tt
import theano.tensor.slinalg as sla  # theano-wrapped scipy linear algebra
import theano.tensor.nlinalg as nla  # theano-wrapped numpy linear algebra
import theano

theano.config.gcc.cxxflags = "-Wno-c++11-narrowing"

import pandas as pd
dta = pd.read_stata('https://www.stata-press.com/data/r14/rgnp.dta').iloc[1:]
dta.index = pd.DatetimeIndex(dta.date,freq='QS')
dta_hamilton = dta.rgnp

# Plot the data
dta_hamilton.plot(title='Growth rate of Real GNP',figsize=(12,3))

# %%
fig,ax = plt.subplots(figsize=(12,4))
#plt.plot(np.round(trace["hmm_states"].mean(axis=0)),label="inferred")
plt.plot(dta_hamilton.values,label="true")

# %%
def solve_equilibrium(n_states,p_transition):
    A = tt.dmatrix('A')
    A = tt.eye(n_states) - p_transition + tt.ones(shape=(n_states,n_states))
    p_equilibrium = pm.Deterministic("p_equilibrium",sla.solve(A.T,tt.ones(shape=(n_states))))
    return p_equilibrium

class HMMStates(pm.Categorical):
    def __init__(self,p_transition,p_equilibrium,n_states,*args,**kwargs):
        """You can ignore this section for the time being."""
        super(pm.Categorical,self).__init__(*args,**kwargs)
        self.p_transition = p_transition
        self.p_equilibrium = p_equilibrium
        # This is needed
        self.k = n_states
        # This is only needed because discrete distributions must define a mode.
        self.mode = tt.cast(0,dtype='int64')

    def logp(self,x):
        """Focus your attention here!"""
        p_eq = self.p_equilibrium
        # broadcast out the transition probabilities,# so that we can broadcast the calculation
        # of log-likelihoods
        p_tr = self.p_transition[x[:-1]]

        # the logp of the initial state evaluated against the equilibrium probabilities
        initial_state_logp = pm.Categorical.dist(p_eq).logp(x[0])

        # the logp of the rest of the states.
        x_i = x[1:]
        ou_like = pm.Categorical.dist(p_tr).logp(x_i)
        transition_logp = tt.sum(ou_like)
        return initial_state_logp + transition_logp

# %%
class HamiltonEmissions(pm.Continuous):
    def __init__(self,states,phi,sigma,mu,**kwargs):
        super().__init__(*args,**kwargs)
        self.states = states
        self.phi = phi
        self.mu = mu
        self.sigma = sigma  # variance

    def logp(self,x):
        """
        x: observations
        """
        states = self.states
        sigma = self.sigma[states]
        mu = self.mu[states]
        phi = self.phi

        z = x - mu  # Centered version of x

        ar_mean = \
            phi[0] * z[0:-4] + \
                phi[1] * z[1:-3] + \
                    phi[2] * z[2:-2] + \
                        phi[3] * z[3:-1]
        
        ar_like = tt.sum(pm.normal.dist(mu= ar_mean + mu[4:],sigma=sigma[4:]).logp(x[4:]))

        boundary_like = pm.normal.dist(mu=0,sigma=sigma[:4]).logp(x[:4])
        return ar_like + boundary_like
# %%
n_states = 2
with pm.Model() as model:
    # Priors for transition matrix
    p_transition = pm.Dirichlet("p_transition",a=tt.ones((n_states,n_states)),shape=(n_states,n_states))

    # Solve for the equilibrium state
    p_equilibrium = solve_equilibrium(n_states,p_transition)

    # HMM state
    hmm_states = HMMStates(
        "hmm_states",p_transition=p_transition,p_equilibrium=p_equilibrium,n_states=n_states,shape=(len(dta_hamilton),)
    )

    # Prior for mu and sigma
    mu = pm.normal("mu",mu=0,sigma=1,))
    sigma = pm.Exponential("sigma",lam=2,))
    phi = pm.normal("phi",0.5,shape=(4,))

    # Observed emission likelihood
    obs = HamiltonEmissions(
        "emission",states=hmm_states,mu=mu,sigma=sigma,phi=phi,observed=dta_hamilton
    )
# %%
with model:
    start = pm.find_MAP()
    step1 = pm.Metropolis(vars=[mu,emission])
    step2 = pm.BinaryGibbsMetropolis(vars=[hmm_states])
    trace = pm.sample(2500,cores=1,chains=2,step=[step1,step2],tune=1500)

# %%
import arviz as az
az.plot_trace(trace,var_names=["p_transition"])

解决方法

pymc3-hmm 包提供了一个前向过滤器后向采样器。这可能更适合您的问题。