40 | | //! default constructor |
41 | | eEF ( ) :epdf ( ) {}; |
42 | | //! logarithm of the normalizing constant, \f$\mathcal{I}\f$ |
43 | | virtual double lognc() const =0; |
44 | | //!TODO decide if it is really needed |
45 | | virtual void dupdate ( mat &v ) {it_error ( "Not implemented" );}; |
46 | | //!Evaluate normalized log-probability |
47 | | virtual double evallog_nn ( const vec &val ) const{it_error ( "Not implemented" );return 0.0;}; |
48 | | //!Evaluate normalized log-probability |
49 | | virtual double evallog ( const vec &val ) const {double tmp;tmp= evallog_nn ( val )-lognc();it_assert_debug ( std::isfinite ( tmp ),"Infinite value" ); return tmp;} |
50 | | //!Evaluate normalized log-probability for many samples |
51 | | virtual vec evallog ( const mat &Val ) const { |
52 | | vec x ( Val.cols() ); |
53 | | for ( int i=0;i<Val.cols();i++ ) {x ( i ) =evallog_nn ( Val.get_col ( i ) ) ;} |
54 | | return x-lognc(); |
55 | | } |
56 | | //!Power of the density, used e.g. to flatten the density |
57 | | virtual void pow ( double p ) {it_error ( "Not implemented" );}; |
58 | | }; |
59 | | |
60 | | /*! |
61 | | * \brief Exponential family model. |
62 | | |
63 | | * More?... |
64 | | */ |
65 | | |
66 | | class mEF : public mpdf { |
67 | | |
68 | | public: |
| 42 | //! default constructor |
| 43 | eEF ( ) :epdf ( ) {}; |
| 44 | //! logarithm of the normalizing constant, \f$\mathcal{I}\f$ |
| 45 | virtual double lognc() const =0; |
| 46 | //!TODO decide if it is really needed |
| 47 | virtual void dupdate ( mat &v ) {it_error ( "Not implemented" );}; |
| 48 | //!Evaluate normalized log-probability |
| 49 | virtual double evallog_nn ( const vec &val ) const{it_error ( "Not implemented" );return 0.0;}; |
| 50 | //!Evaluate normalized log-probability |
| 51 | virtual double evallog ( const vec &val ) const {double tmp;tmp= evallog_nn ( val )-lognc();it_assert_debug ( std::isfinite ( tmp ),"Infinite value" ); return tmp;} |
| 52 | //!Evaluate normalized log-probability for many samples |
| 53 | virtual vec evallog ( const mat &Val ) const |
| 54 | { |
| 55 | vec x ( Val.cols() ); |
| 56 | for ( int i=0;i<Val.cols();i++ ) {x ( i ) =evallog_nn ( Val.get_col ( i ) ) ;} |
| 57 | return x-lognc(); |
| 58 | } |
| 59 | //!Power of the density, used e.g. to flatten the density |
| 60 | virtual void pow ( double p ) {it_error ( "Not implemented" );}; |
| 61 | }; |
| 62 | |
| 63 | /*! |
| 64 | * \brief Exponential family model. |
| 65 | |
| 66 | * More?... |
| 67 | */ |
| 68 | |
| 69 | class mEF : public mpdf |
| 70 | { |
| 71 | |
| 72 | public: |
| 73 | //! Default constructor |
| 74 | mEF ( ) :mpdf ( ) {}; |
| 75 | }; |
| 76 | |
| 77 | //! Estimator for Exponential family |
| 78 | class BMEF : public BM |
| 79 | { |
| 80 | protected: |
| 81 | //! forgetting factor |
| 82 | double frg; |
| 83 | //! cached value of lognc() in the previous step (used in evaluation of \c ll ) |
| 84 | double last_lognc; |
| 85 | public: |
| 86 | //! Default constructor (=empty constructor) |
| 87 | BMEF ( double frg0=1.0 ) :BM ( ), frg ( frg0 ) {} |
| 88 | //! Copy constructor |
| 89 | BMEF ( const BMEF &B ) :BM ( B ), frg ( B.frg ), last_lognc ( B.last_lognc ) {} |
| 90 | //!get statistics from another model |
| 91 | virtual void set_statistics ( const BMEF* BM0 ) {it_error ( "Not implemented" );}; |
| 92 | //! Weighted update of sufficient statistics (Bayes rule) |
| 93 | virtual void bayes ( const vec &data, const double w ) {}; |
| 94 | //original Bayes |
| 95 | void bayes ( const vec &dt ); |
| 96 | //!Flatten the posterior according to the given BMEF (of the same type!) |
| 97 | virtual void flatten ( const BMEF * B ) {it_error ( "Not implemented" );} |
| 98 | //!Flatten the posterior as if to keep nu0 data |
| 99 | // virtual void flatten ( double nu0 ) {it_error ( "Not implemented" );} |
| 100 | |
| 101 | BMEF* _copy_ () const {it_error ( "function _copy_ not implemented for this BM" ); return NULL;}; |
| 102 | }; |
| 103 | |
| 104 | template<class sq_T> |
| 105 | class mlnorm; |
| 106 | |
| 107 | /*! |
| 108 | * \brief Gaussian density with positive definite (decomposed) covariance matrix. |
| 109 | |
| 110 | * More?... |
| 111 | */ |
| 112 | template<class sq_T> |
| 113 | class enorm : public eEF |
| 114 | { |
| 115 | protected: |
| 116 | //! mean value |
| 117 | vec mu; |
| 118 | //! Covariance matrix in decomposed form |
| 119 | sq_T R; |
| 120 | public: |
| 121 | //!\name Constructors |
| 122 | //!@{ |
| 123 | |
| 124 | enorm ( ) :eEF ( ), mu ( ),R ( ) {}; |
| 125 | enorm ( const vec &mu,const sq_T &R ) {set_parameters ( mu,R );} |
| 126 | void set_parameters ( const vec &mu,const sq_T &R ); |
| 127 | //!@} |
| 128 | |
| 129 | //! \name Mathematical operations |
| 130 | //!@{ |
| 131 | |
| 132 | //! dupdate in exponential form (not really handy) |
| 133 | void dupdate ( mat &v,double nu=1.0 ); |
| 134 | |
| 135 | vec sample() const; |
| 136 | mat sample ( int N ) const; |
| 137 | double evallog_nn ( const vec &val ) const; |
| 138 | double lognc () const; |
| 139 | vec mean() const {return mu;} |
| 140 | vec variance() const {return diag ( R.to_mat() );} |
| 141 | // mlnorm<sq_T>* condition ( const RV &rvn ) const ; |
| 142 | mpdf* condition ( const RV &rvn ) const ; |
| 143 | // enorm<sq_T>* marginal ( const RV &rv ) const; |
| 144 | epdf* marginal ( const RV &rv ) const; |
| 145 | //!@} |
| 146 | |
| 147 | //! \name Access to attributes |
| 148 | //!@{ |
| 149 | |
| 150 | vec& _mu() {return mu;} |
| 151 | void set_mu ( const vec mu0 ) { mu=mu0;} |
| 152 | sq_T& _R() {return R;} |
| 153 | const sq_T& _R() const {return R;} |
| 154 | //!@} |
| 155 | |
| 156 | }; |
| 157 | |
| 158 | /*! |
| 159 | * \brief Gauss-inverse-Wishart density stored in LD form |
| 160 | |
| 161 | * For \f$p\f$-variate densities, given rv.count() should be \f$p\times\f$ V.rows(). |
| 162 | * |
| 163 | */ |
| 164 | class egiw : public eEF |
| 165 | { |
| 166 | protected: |
| 167 | //! Extended information matrix of sufficient statistics |
| 168 | ldmat V; |
| 169 | //! Number of data records (degrees of freedom) of sufficient statistics |
| 170 | double nu; |
| 171 | //! Dimension of the output |
| 172 | int dimx; |
| 173 | //! Dimension of the regressor |
| 174 | int nPsi; |
| 175 | public: |
| 176 | //!\name Constructors |
| 177 | //!@{ |
| 178 | egiw() :eEF() {}; |
| 179 | egiw ( int dimx0, ldmat V0, double nu0=-1.0 ) :eEF() {set_parameters ( dimx0,V0, nu0 );}; |
| 180 | |
| 181 | void set_parameters ( int dimx0, ldmat V0, double nu0=-1.0 ) |
| 182 | { |
| 183 | dimx=dimx0; |
| 184 | nPsi = V0.rows()-dimx; |
| 185 | dim = dimx* ( dimx+nPsi ); // size(R) + size(Theta) |
| 186 | |
| 187 | V=V0; |
| 188 | if ( nu0<0 ) |
| 189 | { |
| 190 | nu = 0.1 +nPsi +2*dimx +2; // +2 assures finite expected value of R |
| 191 | // terms before that are sufficient for finite normalization |
| 192 | } |
| 193 | else |
| 194 | { |
| 195 | nu=nu0; |
| 196 | } |
| 197 | } |
| 198 | //!@} |
| 199 | |
| 200 | vec sample() const; |
| 201 | vec mean() const; |
| 202 | vec variance() const; |
| 203 | void mean_mat ( mat &M, mat&R ) const; |
| 204 | //! In this instance, val= [theta, r]. For multivariate instances, it is stored columnwise val = [theta_1 theta_2 ... r_1 r_2 ] |
| 205 | double evallog_nn ( const vec &val ) const; |
| 206 | double lognc () const; |
| 207 | void pow ( double p ) {V*=p;nu*=p;}; |
| 208 | |
| 209 | //! \name Access attributes |
| 210 | //!@{ |
| 211 | |
| 212 | ldmat& _V() {return V;} |
| 213 | const ldmat& _V() const {return V;} |
| 214 | double& _nu() {return nu;} |
| 215 | const double& _nu() const {return nu;} |
| 216 | //!@} |
| 217 | }; |
| 218 | |
| 219 | /*! \brief Dirichlet posterior density |
| 220 | |
| 221 | Continuous Dirichlet density of \f$n\f$-dimensional variable \f$x\f$ |
| 222 | \f[ |
| 223 | f(x|\beta) = \frac{\Gamma[\gamma]}{\prod_{i=1}^{n}\Gamma(\beta_i)} \prod_{i=1}^{n}x_i^{\beta_i-1} |
| 224 | \f] |
| 225 | where \f$\gamma=\sum_i \beta_i\f$. |
| 226 | */ |
| 227 | class eDirich: public eEF |
| 228 | { |
| 229 | protected: |
| 230 | //!sufficient statistics |
| 231 | vec beta; |
| 232 | //!speedup variable |
| 233 | double gamma; |
| 234 | public: |
| 235 | //!\name Constructors |
| 236 | //!@{ |
| 237 | |
| 238 | eDirich () : eEF ( ) {}; |
| 239 | eDirich ( const eDirich &D0 ) : eEF () {set_parameters ( D0.beta );}; |
| 240 | eDirich ( const vec &beta0 ) {set_parameters ( beta0 );}; |
| 241 | void set_parameters ( const vec &beta0 ) |
| 242 | { |
| 243 | beta= beta0; |
| 244 | dim = beta.length(); |
| 245 | gamma = sum ( beta ); |
| 246 | } |
| 247 | //!@} |
| 248 | |
| 249 | vec sample() const {it_error ( "Not implemented" );return vec_1 ( 0.0 );}; |
| 250 | vec mean() const {return beta/gamma;}; |
| 251 | vec variance() const {return elem_mult ( beta, ( beta+1 ) ) / ( gamma* ( gamma+1 ) );} |
| 252 | //! In this instance, val is ... |
| 253 | double evallog_nn ( const vec &val ) const |
| 254 | { |
| 255 | double tmp; tmp= ( beta-1 ) *log ( val ); it_assert_debug ( std::isfinite ( tmp ),"Infinite value" ); |
| 256 | return tmp; |
| 257 | }; |
| 258 | double lognc () const |
| 259 | { |
| 260 | double tmp; |
| 261 | double gam=sum ( beta ); |
| 262 | double lgb=0.0; |
| 263 | for ( int i=0;i<beta.length();i++ ) {lgb+=lgamma ( beta ( i ) );} |
| 264 | tmp= lgb-lgamma ( gam ); |
| 265 | it_assert_debug ( std::isfinite ( tmp ),"Infinite value" ); |
| 266 | return tmp; |
| 267 | }; |
| 268 | //!access function |
| 269 | vec& _beta() {return beta;} |
| 270 | //!Set internal parameters |
| 271 | }; |
| 272 | |
| 273 | //! \brief Estimator for Multinomial density |
| 274 | class multiBM : public BMEF |
| 275 | { |
| 276 | protected: |
| 277 | //! Conjugate prior and posterior |
| 278 | eDirich est; |
| 279 | //! Pointer inside est to sufficient statistics |
| 280 | vec β |
| 281 | public: |
| 282 | //!Default constructor |
| 283 | multiBM ( ) : BMEF ( ),est ( ),beta ( est._beta() ) |
| 284 | { |
| 285 | if ( beta.length() >0 ) {last_lognc=est.lognc();} |
| 286 | else{last_lognc=0.0;} |
| 287 | } |
| 288 | //!Copy constructor |
| 289 | multiBM ( const multiBM &B ) : BMEF ( B ),est ( B.est ),beta ( est._beta() ) {} |
| 290 | //! Sets sufficient statistics to match that of givefrom mB0 |
| 291 | void set_statistics ( const BM* mB0 ) {const multiBM* mB=dynamic_cast<const multiBM*> ( mB0 ); beta=mB->beta;} |
| 292 | void bayes ( const vec &dt ) |
| 293 | { |
| 294 | if ( frg<1.0 ) {beta*=frg;last_lognc=est.lognc();} |
| 295 | beta+=dt; |
| 296 | if ( evalll ) {ll=est.lognc()-last_lognc;} |
| 297 | } |
| 298 | double logpred ( const vec &dt ) const |
| 299 | { |
| 300 | eDirich pred ( est ); |
| 301 | vec &beta = pred._beta(); |
| 302 | |
| 303 | double lll; |
| 304 | if ( frg<1.0 ) |
| 305 | {beta*=frg;lll=pred.lognc();} |
| 306 | else |
| 307 | if ( evalll ) {lll=last_lognc;} |
| 308 | else{lll=pred.lognc();} |
| 309 | |
| 310 | beta+=dt; |
| 311 | return pred.lognc()-lll; |
| 312 | } |
| 313 | void flatten ( const BMEF* B ) |
| 314 | { |
| 315 | const multiBM* E=dynamic_cast<const multiBM*> ( B ); |
| 316 | // sum(beta) should be equal to sum(B.beta) |
| 317 | const vec &Eb=E->beta;//const_cast<multiBM*> ( E )->_beta(); |
| 318 | beta*= ( sum ( Eb ) /sum ( beta ) ); |
| 319 | if ( evalll ) {last_lognc=est.lognc();} |
| 320 | } |
| 321 | const epdf& posterior() const {return est;}; |
| 322 | const eDirich* _e() const {return &est;}; |
| 323 | void set_parameters ( const vec &beta0 ) |
| 324 | { |
| 325 | est.set_parameters ( beta0 ); |
| 326 | if ( evalll ) {last_lognc=est.lognc();} |
| 327 | } |
| 328 | }; |
| 329 | |
| 330 | /*! |
| 331 | \brief Gamma posterior density |
| 332 | |
| 333 | Multivariate Gamma density as product of independent univariate densities. |
| 334 | \f[ |
| 335 | f(x|\alpha,\beta) = \prod f(x_i|\alpha_i,\beta_i) |
| 336 | \f] |
| 337 | */ |
| 338 | |
| 339 | class egamma : public eEF |
| 340 | { |
| 341 | protected: |
| 342 | //! Vector \f$\alpha\f$ |
| 343 | vec alpha; |
| 344 | //! Vector \f$\beta\f$ |
| 345 | vec beta; |
| 346 | public : |
| 347 | //! \name Constructors |
| 348 | //!@{ |
| 349 | egamma ( ) :eEF ( ), alpha ( 0 ), beta ( 0 ) {}; |
| 350 | egamma ( const vec &a, const vec &b ) {set_parameters ( a, b );}; |
| 351 | void set_parameters ( const vec &a, const vec &b ) {alpha=a,beta=b;dim = alpha.length();}; |
| 352 | //!@} |
| 353 | |
| 354 | vec sample() const; |
| 355 | //! TODO: is it used anywhere? |
| 356 | // mat sample ( int N ) const; |
| 357 | double evallog ( const vec &val ) const; |
| 358 | double lognc () const; |
| 359 | //! Returns poiter to alpha and beta. Potentially dengerous: use with care! |
| 360 | vec& _alpha() {return alpha;} |
| 361 | vec& _beta() {return beta;} |
| 362 | vec mean() const {return elem_div ( alpha,beta );} |
| 363 | vec variance() const {return elem_div ( alpha,elem_mult ( beta,beta ) ); } |
| 364 | }; |
| 365 | |
| 366 | /*! |
| 367 | \brief Inverse-Gamma posterior density |
| 368 | |
| 369 | Multivariate inverse-Gamma density as product of independent univariate densities. |
| 370 | \f[ |
| 371 | f(x|\alpha,\beta) = \prod f(x_i|\alpha_i,\beta_i) |
| 372 | \f] |
| 373 | |
| 374 | Vector \f$\beta\f$ has different meaning (in fact it is 1/beta as used in definition of iG) |
| 375 | |
| 376 | Inverse Gamma can be converted to Gamma using \f[ |
| 377 | x\sim iG(a,b) => 1/x\sim G(a,1/b) |
| 378 | \f] |
| 379 | This relation is used in sampling. |
| 380 | */ |
| 381 | |
| 382 | class eigamma : public egamma |
| 383 | { |
| 384 | protected: |
| 385 | public : |
| 386 | //! \name Constructors |
| 387 | //! All constructors are inherited |
| 388 | //!@{ |
| 389 | //!@} |
| 390 | |
| 391 | vec sample() const {return 1.0/egamma::sample();}; |
| 392 | //! Returns poiter to alpha and beta. Potentially dangerous: use with care! |
| 393 | vec mean() const {return elem_div ( beta,alpha-1 );} |
| 394 | vec variance() const {vec mea=mean(); return elem_div ( elem_mult ( mea,mea ),alpha-2 );} |
| 395 | }; |
| 396 | /* |
| 397 | //! Weighted mixture of epdfs with external owned components. |
| 398 | class emix : public epdf { |
| 399 | protected: |
| 400 | int n; |
| 401 | vec &w; |
| 402 | Array<epdf*> Coms; |
| 403 | public: |
70 | | mEF ( ) :mpdf ( ) {}; |
71 | | }; |
72 | | |
73 | | //! Estimator for Exponential family |
74 | | class BMEF : public BM { |
75 | | protected: |
76 | | //! forgetting factor |
77 | | double frg; |
78 | | //! cached value of lognc() in the previous step (used in evaluation of \c ll ) |
79 | | double last_lognc; |
80 | | public: |
81 | | //! Default constructor (=empty constructor) |
82 | | BMEF ( double frg0=1.0 ) :BM ( ), frg ( frg0 ) {} |
83 | | //! Copy constructor |
84 | | BMEF ( const BMEF &B ) :BM ( B ), frg ( B.frg ), last_lognc ( B.last_lognc ) {} |
85 | | //!get statistics from another model |
86 | | virtual void set_statistics ( const BMEF* BM0 ) {it_error ( "Not implemented" );}; |
87 | | //! Weighted update of sufficient statistics (Bayes rule) |
88 | | virtual void bayes ( const vec &data, const double w ) {}; |
89 | | //original Bayes |
90 | | void bayes ( const vec &dt ); |
91 | | //!Flatten the posterior according to the given BMEF (of the same type!) |
92 | | virtual void flatten ( const BMEF * B ) {it_error ( "Not implemented" );} |
93 | | //!Flatten the posterior as if to keep nu0 data |
94 | | // virtual void flatten ( double nu0 ) {it_error ( "Not implemented" );} |
95 | | |
96 | | BMEF* _copy_ () const {it_error ( "function _copy_ not implemented for this BM" ); return NULL;}; |
97 | | }; |
98 | | |
99 | | template<class sq_T> |
100 | | class mlnorm; |
101 | | |
102 | | /*! |
103 | | * \brief Gaussian density with positive definite (decomposed) covariance matrix. |
104 | | |
105 | | * More?... |
106 | | */ |
107 | | template<class sq_T> |
108 | | class enorm : public eEF { |
109 | | protected: |
110 | | //! mean value |
111 | | vec mu; |
112 | | //! Covariance matrix in decomposed form |
113 | | sq_T R; |
114 | | public: |
115 | | //!\name Constructors |
116 | | //!@{ |
117 | | |
118 | | enorm ( ) :eEF ( ), mu ( ),R ( ) {}; |
119 | | enorm ( const vec &mu,const sq_T &R ) {set_parameters ( mu,R );} |
120 | | void set_parameters ( const vec &mu,const sq_T &R ); |
121 | | //!@} |
122 | | |
123 | | //! \name Mathematical operations |
124 | | //!@{ |
125 | | |
126 | | //! dupdate in exponential form (not really handy) |
127 | | void dupdate ( mat &v,double nu=1.0 ); |
128 | | |
129 | | vec sample() const; |
130 | | mat sample ( int N ) const; |
131 | | double evallog_nn ( const vec &val ) const; |
132 | | double lognc () const; |
133 | | vec mean() const {return mu;} |
134 | | vec variance() const {return diag ( R.to_mat() );} |
135 | | // mlnorm<sq_T>* condition ( const RV &rvn ) const ; |
136 | | mpdf* condition ( const RV &rvn ) const ; |
137 | | // enorm<sq_T>* marginal ( const RV &rv ) const; |
138 | | epdf* marginal ( const RV &rv ) const; |
139 | | //!@} |
140 | | |
141 | | //! \name Access to attributes |
142 | | //!@{ |
143 | | |
144 | | vec& _mu() {return mu;} |
145 | | void set_mu ( const vec mu0 ) { mu=mu0;} |
146 | | sq_T& _R() {return R;} |
147 | | const sq_T& _R() const {return R;} |
148 | | //!@} |
149 | | |
150 | | }; |
151 | | |
152 | | /*! |
153 | | * \brief Gauss-inverse-Wishart density stored in LD form |
154 | | |
155 | | * For \f$p\f$-variate densities, given rv.count() should be \f$p\times\f$ V.rows(). |
156 | | * |
157 | | */ |
158 | | class egiw : public eEF { |
159 | | protected: |
160 | | //! Extended information matrix of sufficient statistics |
161 | | ldmat V; |
162 | | //! Number of data records (degrees of freedom) of sufficient statistics |
163 | | double nu; |
164 | | //! Dimension of the output |
165 | | int dimx; |
166 | | //! Dimension of the regressor |
167 | | int nPsi; |
168 | | public: |
169 | | //!\name Constructors |
170 | | //!@{ |
171 | | egiw() :eEF() {}; |
172 | | egiw ( int dimx0, ldmat V0, double nu0=-1.0 ) :eEF() {set_parameters ( dimx0,V0, nu0 );}; |
173 | | |
174 | | void set_parameters ( int dimx0, ldmat V0, double nu0=-1.0 ) { |
175 | | dimx=dimx0; |
176 | | nPsi = V0.rows()-dimx; |
177 | | dim = dimx* ( dimx+nPsi ); // size(R) + size(Theta) |
178 | | |
179 | | V=V0; |
180 | | if ( nu0<0 ) { |
181 | | nu = 0.1 +nPsi +2*dimx +2; // +2 assures finite expected value of R |
182 | | // terms before that are sufficient for finite normalization |
183 | | } |
184 | | else { |
185 | | nu=nu0; |
186 | | } |
187 | | } |
188 | | //!@} |
189 | | |
190 | | vec sample() const; |
191 | | vec mean() const; |
192 | | vec variance() const; |
193 | | void mean_mat ( mat &M, mat&R ) const; |
194 | | //! In this instance, val= [theta, r]. For multivariate instances, it is stored columnwise val = [theta_1 theta_2 ... r_1 r_2 ] |
195 | | double evallog_nn ( const vec &val ) const; |
196 | | double lognc () const; |
197 | | void pow ( double p ) {V*=p;nu*=p;}; |
198 | | |
199 | | //! \name Access attributes |
200 | | //!@{ |
201 | | |
202 | | ldmat& _V() {return V;} |
203 | | const ldmat& _V() const {return V;} |
204 | | double& _nu() {return nu;} |
205 | | const double& _nu() const {return nu;} |
206 | | //!@} |
207 | | }; |
208 | | |
209 | | /*! \brief Dirichlet posterior density |
210 | | |
211 | | Continuous Dirichlet density of \f$n\f$-dimensional variable \f$x\f$ |
212 | | \f[ |
213 | | f(x|\beta) = \frac{\Gamma[\gamma]}{\prod_{i=1}^{n}\Gamma(\beta_i)} \prod_{i=1}^{n}x_i^{\beta_i-1} |
214 | | \f] |
215 | | where \f$\gamma=\sum_i \beta_i\f$. |
216 | | */ |
217 | | class eDirich: public eEF { |
218 | | protected: |
219 | | //!sufficient statistics |
220 | | vec beta; |
221 | | //!speedup variable |
222 | | double gamma; |
223 | | public: |
224 | | //!\name Constructors |
225 | | //!@{ |
226 | | |
227 | | eDirich () : eEF ( ) {}; |
228 | | eDirich ( const eDirich &D0 ) : eEF () {set_parameters ( D0.beta );}; |
229 | | eDirich ( const vec &beta0 ) {set_parameters ( beta0 );}; |
230 | | void set_parameters ( const vec &beta0 ) { |
231 | | beta= beta0; |
232 | | dim = beta.length(); |
233 | | gamma = sum ( beta ); |
234 | | } |
235 | | //!@} |
236 | | |
237 | | vec sample() const {it_error ( "Not implemented" );return vec_1 ( 0.0 );}; |
238 | | vec mean() const {return beta/gamma;}; |
239 | | vec variance() const {return elem_mult ( beta, ( beta+1 ) ) / ( gamma* ( gamma+1 ) );} |
240 | | //! In this instance, val is ... |
241 | | double evallog_nn ( const vec &val ) const { |
242 | | double tmp; tmp= ( beta-1 ) *log ( val ); it_assert_debug ( std::isfinite ( tmp ),"Infinite value" ); |
243 | | return tmp; |
244 | | }; |
245 | | double lognc () const { |
246 | | double tmp; |
247 | | double gam=sum ( beta ); |
248 | | double lgb=0.0; |
249 | | for ( int i=0;i<beta.length();i++ ) {lgb+=lgamma ( beta ( i ) );} |
250 | | tmp= lgb-lgamma ( gam ); |
251 | | it_assert_debug ( std::isfinite ( tmp ),"Infinite value" ); |
252 | | return tmp; |
253 | | }; |
254 | | //!access function |
255 | | vec& _beta() {return beta;} |
256 | | //!Set internal parameters |
257 | | }; |
258 | | |
259 | | //! \brief Estimator for Multinomial density |
260 | | class multiBM : public BMEF { |
261 | | protected: |
262 | | //! Conjugate prior and posterior |
263 | | eDirich est; |
264 | | //! Pointer inside est to sufficient statistics |
265 | | vec β |
266 | | public: |
267 | | //!Default constructor |
268 | | multiBM ( ) : BMEF ( ),est ( ),beta ( est._beta() ) {if ( beta.length() >0 ) {last_lognc=est.lognc();}else{last_lognc=0.0;}} |
269 | | //!Copy constructor |
270 | | multiBM ( const multiBM &B ) : BMEF ( B ),est ( B.est ),beta ( est._beta() ) {} |
271 | | //! Sets sufficient statistics to match that of givefrom mB0 |
272 | | void set_statistics ( const BM* mB0 ) {const multiBM* mB=dynamic_cast<const multiBM*> ( mB0 ); beta=mB->beta;} |
273 | | void bayes ( const vec &dt ) { |
274 | | if ( frg<1.0 ) {beta*=frg;last_lognc=est.lognc();} |
275 | | beta+=dt; |
276 | | if ( evalll ) {ll=est.lognc()-last_lognc;} |
277 | | } |
278 | | double logpred ( const vec &dt ) const { |
279 | | eDirich pred ( est ); |
280 | | vec &beta = pred._beta(); |
281 | | |
282 | | double lll; |
283 | | if ( frg<1.0 ) |
284 | | {beta*=frg;lll=pred.lognc();} |
285 | | else |
286 | | if ( evalll ) {lll=last_lognc;} |
287 | | else{lll=pred.lognc();} |
288 | | |
289 | | beta+=dt; |
290 | | return pred.lognc()-lll; |
291 | | } |
292 | | void flatten ( const BMEF* B ) { |
293 | | const multiBM* E=dynamic_cast<const multiBM*> ( B ); |
294 | | // sum(beta) should be equal to sum(B.beta) |
295 | | const vec &Eb=E->beta;//const_cast<multiBM*> ( E )->_beta(); |
296 | | beta*= ( sum ( Eb ) /sum ( beta ) ); |
297 | | if ( evalll ) {last_lognc=est.lognc();} |
298 | | } |
299 | | const epdf& posterior() const {return est;}; |
300 | | const eDirich* _e() const {return &est;}; |
301 | | void set_parameters ( const vec &beta0 ) { |
302 | | est.set_parameters ( beta0 ); |
303 | | if ( evalll ) {last_lognc=est.lognc();} |
304 | | } |
305 | | }; |
306 | | |
307 | | /*! |
308 | | \brief Gamma posterior density |
309 | | |
310 | | Multivariate Gamma density as product of independent univariate densities. |
311 | | \f[ |
312 | | f(x|\alpha,\beta) = \prod f(x_i|\alpha_i,\beta_i) |
313 | | \f] |
314 | | */ |
315 | | |
316 | | class egamma : public eEF { |
317 | | protected: |
318 | | //! Vector \f$\alpha\f$ |
319 | | vec alpha; |
320 | | //! Vector \f$\beta\f$ |
321 | | vec beta; |
322 | | public : |
323 | | //! \name Constructors |
324 | | //!@{ |
325 | | egamma ( ) :eEF ( ), alpha(0), beta(0) {}; |
326 | | egamma ( const vec &a, const vec &b ) {set_parameters ( a, b );}; |
327 | | void set_parameters ( const vec &a, const vec &b ) {alpha=a,beta=b;dim = alpha.length();}; |
328 | | //!@} |
329 | | |
330 | | vec sample() const; |
331 | | //! TODO: is it used anywhere? |
332 | | // mat sample ( int N ) const; |
333 | | double evallog ( const vec &val ) const; |
334 | | double lognc () const; |
335 | | //! Returns poiter to alpha and beta. Potentially dengerous: use with care! |
336 | | vec& _alpha() {return alpha;} |
337 | | vec& _beta() {return beta;} |
338 | | vec mean() const {return elem_div ( alpha,beta );} |
339 | | vec variance() const {return elem_div ( alpha,elem_mult ( beta,beta ) ); } |
340 | | }; |
341 | | |
342 | | /*! |
343 | | \brief Inverse-Gamma posterior density |
344 | | |
345 | | Multivariate inverse-Gamma density as product of independent univariate densities. |
346 | | \f[ |
347 | | f(x|\alpha,\beta) = \prod f(x_i|\alpha_i,\beta_i) |
348 | | \f] |
349 | | |
350 | | Vector \f$\beta\f$ has different meaning (in fact it is 1/beta as used in definition of iG) |
351 | | |
352 | | Inverse Gamma can be converted to Gamma using \f[ |
353 | | x\sim iG(a,b) => 1/x\sim G(a,1/b) |
354 | | \f] |
355 | | This relation is used in sampling. |
356 | | */ |
357 | | |
358 | | class eigamma : public egamma { |
359 | | protected: |
360 | | public : |
361 | | //! \name Constructors |
362 | | //! All constructors are inherited |
363 | | //!@{ |
364 | | //!@} |
365 | | |
366 | | vec sample() const {return 1.0/egamma::sample();}; |
367 | | //! Returns poiter to alpha and beta. Potentially dangerous: use with care! |
368 | | vec mean() const {return elem_div ( beta,alpha-1 );} |
369 | | vec variance() const {vec mea=mean(); return elem_div ( elem_mult ( mea,mea ),alpha-2 );} |
370 | | }; |
371 | | /* |
372 | | //! Weighted mixture of epdfs with external owned components. |
373 | | class emix : public epdf { |
374 | | protected: |
375 | | int n; |
376 | | vec &w; |
377 | | Array<epdf*> Coms; |
378 | | public: |
379 | | //! Default constructor |
380 | | emix ( const RV &rv, vec &w0): epdf(rv), n(w0.length()), w(w0), Coms(n) {}; |
381 | | void set_parameters( int &i, double wi, epdf* ep){w(i)=wi;Coms(i)=ep;} |
382 | | vec mean(){vec pom; for(int i=0;i<n;i++){pom+=Coms(i)->mean()*w(i);} return pom;}; |
383 | | vec sample() {it_error ( "Not implemented" );return 0;} |
384 | | }; |
385 | | */ |
| 405 | emix ( const RV &rv, vec &w0): epdf(rv), n(w0.length()), w(w0), Coms(n) {}; |
| 406 | void set_parameters( int &i, double wi, epdf* ep){w(i)=wi;Coms(i)=ep;} |
| 407 | vec mean(){vec pom; for(int i=0;i<n;i++){pom+=Coms(i)->mean()*w(i);} return pom;}; |
| 408 | vec sample() {it_error ( "Not implemented" );return 0;} |
| 409 | }; |
| 410 | */ |
469 | | template<class sq_T> |
470 | | class mgnorm : public mEF { |
471 | | protected: |
472 | | //! Internal epdf that arise by conditioning on \c rvc |
473 | | enorm<sq_T> epdf; |
474 | | vec μ |
475 | | fnc* g; |
476 | | public: |
477 | | //!default constructor |
478 | | mgnorm() :mu ( epdf._mu() ) {ep=&epdf;} |
479 | | //!set mean function |
480 | | void set_parameters ( fnc* g0, const sq_T &R0 ) {g=g0; epdf.set_parameters ( zeros ( g->dimension() ), R0 );} |
481 | | void condition ( const vec &cond ) {mu=g->eval ( cond );}; |
482 | | }; |
483 | | |
484 | | /*! (Approximate) Student t density with linear function of mean value |
485 | | |
486 | | The internal epdf of this class is of the type of a Gaussian (enorm). |
487 | | However, each conditioning is trying to assure the best possible approximation by taking into account the zeta function. See [] for reference. |
488 | | |
489 | | Perhaps a moment-matching technique? |
490 | | */ |
491 | | class mlstudent : public mlnorm<ldmat> { |
492 | | protected: |
493 | | ldmat Lambda; |
494 | | ldmat &_R; |
495 | | ldmat Re; |
496 | | public: |
497 | | mlstudent ( ) :mlnorm<ldmat> (), |
498 | | Lambda (), _R ( epdf._R() ) {} |
499 | | void set_parameters ( const mat &A0, const vec &mu0, const ldmat &R0, const ldmat& Lambda0 ) { |
500 | | it_assert_debug ( A0.rows() ==mu0.length(),"" ); |
501 | | it_assert_debug ( R0.rows() ==A0.rows(),"" ); |
502 | | |
503 | | epdf.set_parameters ( mu0,Lambda ); // |
504 | | A = A0; |
505 | | mu_const = mu0; |
506 | | Re=R0; |
507 | | Lambda = Lambda0; |
508 | | } |
509 | | void condition ( const vec &cond ) { |
510 | | _mu = A*cond + mu_const; |
511 | | double zeta; |
512 | | //ugly hack! |
513 | | if ( ( cond.length() +1 ) ==Lambda.rows() ) { |
514 | | zeta = Lambda.invqform ( concat ( cond, vec_1 ( 1.0 ) ) ); |
515 | | } |
516 | | else { |
517 | | zeta = Lambda.invqform ( cond ); |
518 | | } |
519 | | _R = Re; |
520 | | _R*= ( 1+zeta );// / ( nu ); << nu is in Re!!!!!! |
521 | | }; |
522 | | |
523 | | }; |
524 | | /*! |
525 | | \brief Gamma random walk |
526 | | |
527 | | Mean value, \f$\mu\f$, of this density is given by \c rvc . |
528 | | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
529 | | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
530 | | |
531 | | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
532 | | */ |
533 | | class mgamma : public mEF { |
534 | | protected: |
535 | | //! Internal epdf that arise by conditioning on \c rvc |
536 | | egamma epdf; |
537 | | //! Constant \f$k\f$ |
538 | | double k; |
539 | | //! cache of epdf.beta |
540 | | vec &_beta; |
541 | | |
542 | | public: |
543 | | //! Constructor |
544 | | mgamma ( ) : mEF ( ), epdf (), _beta ( epdf._beta() ) {ep=&epdf;}; |
545 | | //! Set value of \c k |
546 | | void set_parameters ( double k, const vec &beta0 ); |
547 | | void condition ( const vec &val ) {_beta=k/val;}; |
548 | | }; |
549 | | |
550 | | /*! |
551 | | \brief Inverse-Gamma random walk |
552 | | |
553 | | Mean value, \f$ \mu \f$, of this density is given by \c rvc . |
554 | | Standard deviation of the random walk is proportional to one \f$ k \f$-th the mean. |
555 | | This is achieved by setting \f$ \alpha=\mu/k^2+2 \f$ and \f$ \beta=\mu(\alpha-1)\f$. |
556 | | |
557 | | The standard deviation of the walk is then: \f$ \mu/\sqrt(k)\f$. |
558 | | */ |
559 | | class migamma : public mEF { |
560 | | protected: |
561 | | //! Internal epdf that arise by conditioning on \c rvc |
562 | | eigamma epdf; |
563 | | //! Constant \f$k\f$ |
564 | | double k; |
565 | | //! cache of epdf.alpha |
566 | | vec &_alpha; |
567 | | //! cache of epdf.beta |
568 | | vec &_beta; |
569 | | |
570 | | public: |
571 | | //! \name Constructors |
572 | | //!@{ |
573 | | migamma ( ) : mEF (), epdf ( ), _alpha ( epdf._alpha() ), _beta ( epdf._beta() ) {ep=&epdf;}; |
574 | | migamma ( const migamma &m ) : mEF (), epdf ( m.epdf ), _alpha ( epdf._alpha() ), _beta ( epdf._beta() ) {ep=&epdf;}; |
575 | | //!@} |
576 | | |
577 | | //! Set value of \c k |
578 | | void set_parameters ( int len, double k0 ) { |
579 | | k=k0; |
580 | | epdf.set_parameters ( ( 1.0/ ( k*k ) +2.0 ) *ones ( len ) /*alpha*/, ones ( len ) /*beta*/ ); |
581 | | dimc = dimension(); |
582 | | }; |
583 | | void condition ( const vec &val ) { |
584 | | _beta=elem_mult ( val, ( _alpha-1.0 ) ); |
585 | | }; |
586 | | }; |
587 | | |
588 | | /*! |
589 | | \brief Gamma random walk around a fixed point |
590 | | |
591 | | Mean value, \f$\mu\f$, of this density is given by a geometric combination of \c rvc and given fixed point, \f$p\f$. \f$l\f$ is the coefficient of the geometric combimation |
592 | | \f[ \mu = \mu_{t-1} ^{l} p^{1-l}\f] |
593 | | |
594 | | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
595 | | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
596 | | |
597 | | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
598 | | */ |
599 | | class mgamma_fix : public mgamma { |
600 | | protected: |
601 | | //! parameter l |
602 | | double l; |
603 | | //! reference vector |
604 | | vec refl; |
605 | | public: |
606 | | //! Constructor |
607 | | mgamma_fix ( ) : mgamma ( ),refl () {}; |
608 | | //! Set value of \c k |
609 | | void set_parameters ( double k0 , vec ref0, double l0 ) { |
610 | | mgamma::set_parameters ( k0, ref0 ); |
611 | | refl=pow ( ref0,1.0-l0 );l=l0; |
612 | | dimc=dimension(); |
613 | | }; |
614 | | |
615 | | void condition ( const vec &val ) {vec mean=elem_mult ( refl,pow ( val,l ) ); _beta=k/mean;}; |
616 | | }; |
617 | | |
618 | | |
619 | | /*! |
620 | | \brief Inverse-Gamma random walk around a fixed point |
621 | | |
622 | | Mean value, \f$\mu\f$, of this density is given by a geometric combination of \c rvc and given fixed point, \f$p\f$. \f$l\f$ is the coefficient of the geometric combimation |
623 | | \f[ \mu = \mu_{t-1} ^{l} p^{1-l}\f] |
624 | | |
625 | | ==== Check == vv = |
626 | | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
627 | | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
628 | | |
629 | | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
630 | | */ |
631 | | class migamma_ref : public migamma { |
632 | | protected: |
633 | | //! parameter l |
634 | | double l; |
635 | | //! reference vector |
636 | | vec refl; |
637 | | public: |
638 | | //! Constructor |
639 | | migamma_ref ( ) : migamma (),refl ( ) {}; |
640 | | //! Set value of \c k |
641 | | void set_parameters ( double k0 , vec ref0, double l0 ) { |
642 | | migamma::set_parameters ( ref0.length(), k0 ); |
643 | | refl=pow ( ref0,1.0-l0 ); |
644 | | l=l0; |
645 | | dimc = dimension(); |
646 | | }; |
647 | | |
648 | | void condition ( const vec &val ) { |
649 | | vec mean=elem_mult ( refl,pow ( val,l ) ); |
650 | | migamma::condition ( mean ); |
651 | | }; |
652 | | }; |
653 | | |
654 | | /*! Log-Normal probability density |
655 | | only allow diagonal covariances! |
656 | | |
657 | | Density of the form \f$ \log(x)\sim \mathcal{N}(\mu,\sigma^2), i.e. |
658 | | \f[ |
659 | | x \sim \frac{1}{x\sigma\sqrt{2\pi}}\exp{-\frac{1}{2\sigma^2}(\log(x)-\mu)} |
660 | | \f] |
661 | | |
662 | | */ |
663 | | class elognorm: public enorm<ldmat>{ |
664 | | public: |
665 | | vec sample() const {return exp(enorm<ldmat>::sample());}; |
666 | | vec mean() const {vec var=enorm<ldmat>::variance();return exp(mu - 0.5*var);}; |
667 | | |
668 | | }; |
669 | | |
670 | | /*! |
671 | | \brief Log-Normal random walk |
672 | | |
673 | | Mean value, \f$\mu\f$, is... |
674 | | |
675 | | ==== Check == vv = |
676 | | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
677 | | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
678 | | |
679 | | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
680 | | */ |
681 | | class mlognorm : public mpdf { |
682 | | protected: |
683 | | elognorm eno; |
684 | | //! parameter 1/2*sigma^2 |
685 | | double sig2; |
686 | | //! access |
687 | | vec μ |
688 | | public: |
689 | | //! Constructor |
690 | | mlognorm ( ) : eno (), mu(eno._mu()) {ep=&eno;}; |
691 | | //! Set value of \c k |
692 | | void set_parameters ( int size, double k) { |
693 | | sig2 = 0.5*log(k*k+1); |
694 | | eno.set_parameters(zeros(size),2*sig2*eye(size)); |
| 499 | template<class sq_T> |
| 500 | class mgnorm : public mEF |
| 501 | { |
| 502 | protected: |
| 503 | //! Internal epdf that arise by conditioning on \c rvc |
| 504 | enorm<sq_T> epdf; |
| 505 | vec μ |
| 506 | fnc* g; |
| 507 | public: |
| 508 | //!default constructor |
| 509 | mgnorm() :mu ( epdf._mu() ) {ep=&epdf;} |
| 510 | //!set mean function |
| 511 | void set_parameters ( fnc* g0, const sq_T &R0 ) {g=g0; epdf.set_parameters ( zeros ( g->dimension() ), R0 );} |
| 512 | void condition ( const vec &cond ) {mu=g->eval ( cond );}; |
| 513 | }; |
| 514 | |
| 515 | /*! (Approximate) Student t density with linear function of mean value |
| 516 | |
| 517 | The internal epdf of this class is of the type of a Gaussian (enorm). |
| 518 | However, each conditioning is trying to assure the best possible approximation by taking into account the zeta function. See [] for reference. |
| 519 | |
| 520 | Perhaps a moment-matching technique? |
| 521 | */ |
| 522 | class mlstudent : public mlnorm<ldmat> |
| 523 | { |
| 524 | protected: |
| 525 | ldmat Lambda; |
| 526 | ldmat &_R; |
| 527 | ldmat Re; |
| 528 | public: |
| 529 | mlstudent ( ) :mlnorm<ldmat> (), |
| 530 | Lambda (), _R ( epdf._R() ) {} |
| 531 | void set_parameters ( const mat &A0, const vec &mu0, const ldmat &R0, const ldmat& Lambda0 ) |
| 532 | { |
| 533 | it_assert_debug ( A0.rows() ==mu0.length(),"" ); |
| 534 | it_assert_debug ( R0.rows() ==A0.rows(),"" ); |
| 535 | |
| 536 | epdf.set_parameters ( mu0,Lambda ); // |
| 537 | A = A0; |
| 538 | mu_const = mu0; |
| 539 | Re=R0; |
| 540 | Lambda = Lambda0; |
| 541 | } |
| 542 | void condition ( const vec &cond ) |
| 543 | { |
| 544 | _mu = A*cond + mu_const; |
| 545 | double zeta; |
| 546 | //ugly hack! |
| 547 | if ( ( cond.length() +1 ) ==Lambda.rows() ) |
| 548 | { |
| 549 | zeta = Lambda.invqform ( concat ( cond, vec_1 ( 1.0 ) ) ); |
| 550 | } |
| 551 | else |
| 552 | { |
| 553 | zeta = Lambda.invqform ( cond ); |
| 554 | } |
| 555 | _R = Re; |
| 556 | _R*= ( 1+zeta );// / ( nu ); << nu is in Re!!!!!! |
| 557 | }; |
| 558 | |
| 559 | }; |
| 560 | /*! |
| 561 | \brief Gamma random walk |
| 562 | |
| 563 | Mean value, \f$\mu\f$, of this density is given by \c rvc . |
| 564 | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
| 565 | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
| 566 | |
| 567 | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
| 568 | */ |
| 569 | class mgamma : public mEF |
| 570 | { |
| 571 | protected: |
| 572 | //! Internal epdf that arise by conditioning on \c rvc |
| 573 | egamma epdf; |
| 574 | //! Constant \f$k\f$ |
| 575 | double k; |
| 576 | //! cache of epdf.beta |
| 577 | vec &_beta; |
| 578 | |
| 579 | public: |
| 580 | //! Constructor |
| 581 | mgamma ( ) : mEF ( ), epdf (), _beta ( epdf._beta() ) {ep=&epdf;}; |
| 582 | //! Set value of \c k |
| 583 | void set_parameters ( double k, const vec &beta0 ); |
| 584 | void condition ( const vec &val ) {_beta=k/val;}; |
| 585 | }; |
| 586 | |
| 587 | /*! |
| 588 | \brief Inverse-Gamma random walk |
| 589 | |
| 590 | Mean value, \f$ \mu \f$, of this density is given by \c rvc . |
| 591 | Standard deviation of the random walk is proportional to one \f$ k \f$-th the mean. |
| 592 | This is achieved by setting \f$ \alpha=\mu/k^2+2 \f$ and \f$ \beta=\mu(\alpha-1)\f$. |
| 593 | |
| 594 | The standard deviation of the walk is then: \f$ \mu/\sqrt(k)\f$. |
| 595 | */ |
| 596 | class migamma : public mEF |
| 597 | { |
| 598 | protected: |
| 599 | //! Internal epdf that arise by conditioning on \c rvc |
| 600 | eigamma epdf; |
| 601 | //! Constant \f$k\f$ |
| 602 | double k; |
| 603 | //! cache of epdf.alpha |
| 604 | vec &_alpha; |
| 605 | //! cache of epdf.beta |
| 606 | vec &_beta; |
| 607 | |
| 608 | public: |
| 609 | //! \name Constructors |
| 610 | //!@{ |
| 611 | migamma ( ) : mEF (), epdf ( ), _alpha ( epdf._alpha() ), _beta ( epdf._beta() ) {ep=&epdf;}; |
| 612 | migamma ( const migamma &m ) : mEF (), epdf ( m.epdf ), _alpha ( epdf._alpha() ), _beta ( epdf._beta() ) {ep=&epdf;}; |
| 613 | //!@} |
| 614 | |
| 615 | //! Set value of \c k |
| 616 | void set_parameters ( int len, double k0 ) |
| 617 | { |
| 618 | k=k0; |
| 619 | epdf.set_parameters ( ( 1.0/ ( k*k ) +2.0 ) *ones ( len ) /*alpha*/, ones ( len ) /*beta*/ ); |
| 620 | dimc = dimension(); |
| 621 | }; |
| 622 | void condition ( const vec &val ) |
| 623 | { |
| 624 | _beta=elem_mult ( val, ( _alpha-1.0 ) ); |
| 625 | }; |
| 626 | }; |
| 627 | |
| 628 | /*! |
| 629 | \brief Gamma random walk around a fixed point |
| 630 | |
| 631 | Mean value, \f$\mu\f$, of this density is given by a geometric combination of \c rvc and given fixed point, \f$p\f$. \f$l\f$ is the coefficient of the geometric combimation |
| 632 | \f[ \mu = \mu_{t-1} ^{l} p^{1-l}\f] |
| 633 | |
| 634 | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
| 635 | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
| 636 | |
| 637 | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
| 638 | */ |
| 639 | class mgamma_fix : public mgamma |
| 640 | { |
| 641 | protected: |
| 642 | //! parameter l |
| 643 | double l; |
| 644 | //! reference vector |
| 645 | vec refl; |
| 646 | public: |
| 647 | //! Constructor |
| 648 | mgamma_fix ( ) : mgamma ( ),refl () {}; |
| 649 | //! Set value of \c k |
| 650 | void set_parameters ( double k0 , vec ref0, double l0 ) |
| 651 | { |
| 652 | mgamma::set_parameters ( k0, ref0 ); |
| 653 | refl=pow ( ref0,1.0-l0 );l=l0; |
| 654 | dimc=dimension(); |
| 655 | }; |
| 656 | |
| 657 | void condition ( const vec &val ) {vec mean=elem_mult ( refl,pow ( val,l ) ); _beta=k/mean;}; |
| 658 | }; |
| 659 | |
| 660 | |
| 661 | /*! |
| 662 | \brief Inverse-Gamma random walk around a fixed point |
| 663 | |
| 664 | Mean value, \f$\mu\f$, of this density is given by a geometric combination of \c rvc and given fixed point, \f$p\f$. \f$l\f$ is the coefficient of the geometric combimation |
| 665 | \f[ \mu = \mu_{t-1} ^{l} p^{1-l}\f] |
| 666 | |
| 667 | ==== Check == vv = |
| 668 | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
| 669 | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
| 670 | |
| 671 | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
| 672 | */ |
| 673 | class migamma_ref : public migamma |
| 674 | { |
| 675 | protected: |
| 676 | //! parameter l |
| 677 | double l; |
| 678 | //! reference vector |
| 679 | vec refl; |
| 680 | public: |
| 681 | //! Constructor |
| 682 | migamma_ref ( ) : migamma (),refl ( ) {}; |
| 683 | //! Set value of \c k |
| 684 | void set_parameters ( double k0 , vec ref0, double l0 ) |
| 685 | { |
| 686 | migamma::set_parameters ( ref0.length(), k0 ); |
| 687 | refl=pow ( ref0,1.0-l0 ); |
| 688 | l=l0; |
| 689 | dimc = dimension(); |
| 690 | }; |
| 691 | |
| 692 | void condition ( const vec &val ) |
| 693 | { |
| 694 | vec mean=elem_mult ( refl,pow ( val,l ) ); |
| 695 | migamma::condition ( mean ); |
| 696 | }; |
| 697 | }; |
| 698 | |
| 699 | /*! Log-Normal probability density |
| 700 | only allow diagonal covariances! |
| 701 | |
| 702 | Density of the form \f$ \log(x)\sim \mathcal{N}(\mu,\sigma^2), i.e. |
| 703 | \f[ |
| 704 | x \sim \frac{1}{x\sigma\sqrt{2\pi}}\exp{-\frac{1}{2\sigma^2}(\log(x)-\mu)} |
| 705 | \f] |
| 706 | |
| 707 | */ |
| 708 | class elognorm: public enorm<ldmat> |
| 709 | { |
| 710 | public: |
| 711 | vec sample() const {return exp ( enorm<ldmat>::sample() );}; |
| 712 | vec mean() const {vec var=enorm<ldmat>::variance();return exp ( mu - 0.5*var );}; |
| 713 | |
| 714 | }; |
| 715 | |
| 716 | /*! |
| 717 | \brief Log-Normal random walk |
| 718 | |
| 719 | Mean value, \f$\mu\f$, is... |
| 720 | |
| 721 | ==== Check == vv = |
| 722 | Standard deviation of the random walk is proportional to one \f$k\f$-th the mean. |
| 723 | This is achieved by setting \f$\alpha=k\f$ and \f$\beta=k/\mu\f$. |
| 724 | |
| 725 | The standard deviation of the walk is then: \f$\mu/\sqrt(k)\f$. |
| 726 | */ |
| 727 | class mlognorm : public mpdf |
| 728 | { |
| 729 | protected: |
| 730 | elognorm eno; |
| 731 | //! parameter 1/2*sigma^2 |
| 732 | double sig2; |
| 733 | //! access |
| 734 | vec μ |
| 735 | public: |
| 736 | //! Constructor |
| 737 | mlognorm ( ) : eno (), mu ( eno._mu() ) {ep=&eno;}; |
| 738 | //! Set value of \c k |
| 739 | void set_parameters ( int size, double k ) |
| 740 | { |
| 741 | sig2 = 0.5*log ( k*k+1 ); |
| 742 | eno.set_parameters ( zeros ( size ),2*sig2*eye ( size ) ); |
| 743 | |
| 744 | dimc = size; |
| 745 | }; |
| 746 | |
| 747 | void condition ( const vec &val ) |
| 748 | { |
| 749 | mu=log ( val )-sig2;//elem_mult ( refl,pow ( val,l ) ); |
| 750 | }; |
| 751 | }; |
| 752 | |
| 753 | /*! inverse Wishart density defined on Choleski decomposition |
| 754 | |
| 755 | */ |
| 756 | class eWishartCh : public epdf |
| 757 | { |
| 758 | protected: |
| 759 | //! Upper-Triagle of Choleski decomposition of \f$ \Psi \f$ |
| 760 | chmat Y; |
| 761 | //! dimension of matrix \f$ \Psi \f$ |
| 762 | int p; |
| 763 | //! degrees of freedom \f$ \nu \f$ |
| 764 | double delta; |
| 765 | public: |
| 766 | void set_parameters ( const mat &Y0, const double delta0 ) {Y=chmat ( Y0 );delta=delta0; p=Y.rows(); dim = p*p; } |
| 767 | mat sample_mat() const |
| 768 | { |
| 769 | mat X=zeros ( p,p ); |
| 770 | |
| 771 | //sample diagonal |
| 772 | for ( int i=0;i<p;i++ ) |
| 773 | { |
| 774 | GamRNG.setup ( 0.5* ( delta-i ) , 0.5 ); // no +1 !! index if from 0 |
| 775 | #pragma omp critical |
| 776 | X ( i,i ) =sqrt ( GamRNG() ); |
| 777 | } |
| 778 | //do the rest |
| 779 | for ( int i=0;i<p;i++ ) |
| 780 | { |
| 781 | for ( int j=i+1;j<p;j++ ) |
| 782 | { |
| 783 | #pragma omp critical |
| 784 | X ( i,j ) =NorRNG.sample(); |
| 785 | } |
| 786 | } |
| 787 | return X*Y._Ch();// return upper triangular part of the decomposition |
| 788 | } |
| 789 | vec sample () const |
| 790 | { |
| 791 | return vec ( sample_mat()._data(),p*p ); |
| 792 | } |
| 793 | //! fast access function y0 will be copied into Y.Ch. |
| 794 | void setY ( const mat &Ch0 ) {copy_vector ( dim,Ch0._data(), Y._Ch()._data() );} |
| 795 | //! fast access function y0 will be copied into Y.Ch. |
| 796 | void _setY ( const vec &ch0 ) {copy_vector ( dim, ch0._data(), Y._Ch()._data() ); } |
| 797 | //! access function |
| 798 | const chmat& getY()const {return Y;} |
| 799 | }; |
| 800 | |
| 801 | class eiWishartCh: public epdf |
| 802 | { |
| 803 | protected: |
| 804 | eWishartCh W; |
| 805 | int p; |
| 806 | double delta; |
| 807 | public: |
| 808 | void set_parameters ( const mat &Y0, const double delta0) { |
| 809 | delta = delta0; |
| 810 | W.set_parameters ( inv ( Y0 ),delta0 ); |
| 811 | dim = W.dimension(); p=Y0.rows(); |
| 812 | } |
| 813 | vec sample() const {mat iCh; iCh=inv ( W.sample_mat() ); return vec ( iCh._data(),dim );} |
| 814 | void _setY ( const vec &y0 ) |
| 815 | { |
| 816 | mat Ch ( p,p ); |
| 817 | mat iCh ( p,p ); |
| 818 | copy_vector ( dim, y0._data(), Ch._data() ); |
| 819 | |
| 820 | iCh=inv ( Ch ); |
| 821 | W.setY ( iCh ); |
| 822 | } |
| 823 | virtual double evallog ( const vec &val ) const { |
| 824 | chmat X(p); |
| 825 | const chmat& Y=W.getY(); |
| 826 | |
| 827 | copy_vector(p*p,val._data(),X._Ch()._data()); |
| 828 | chmat iX(p);X.inv(iX); |
| 829 | // compute |
| 830 | // \frac{ |\Psi|^{m/2}|X|^{-(m+p+1)/2}e^{-tr(\Psi X^{-1})/2} }{ 2^{mp/2}\Gamma_p(m/2)}, |
| 831 | mat M=Y.to_mat()*iX.to_mat(); |
| 832 | |
| 833 | double log1 = 0.5*p*(2*Y.logdet())-0.5*(delta+p+1)*(2*X.logdet())-0.5*trace(M); |
| 834 | //Fixme! Multivariate gamma omitted!! it is ok for sampling, but not otherwise!! |
| 835 | |
| 836 | /* if (0) { |
| 837 | mat XX=X.to_mat(); |
| 838 | mat YY=Y.to_mat(); |
| 839 | |
| 840 | double log2 = 0.5*p*log(det(YY))-0.5*(delta+p+1)*log(det(XX))-0.5*trace(YY*inv(XX)); |
| 841 | cout << log1 << "," << log2 << endl; |
| 842 | }*/ |
| 843 | return log1; |
| 844 | }; |
713 | | enum RESAMPLING_METHOD { MULTINOMIAL = 0, STRATIFIED = 1, SYSTEMATIC = 3 }; |
714 | | /*! |
715 | | \brief Weighted empirical density |
716 | | |
717 | | Used e.g. in particle filters. |
718 | | */ |
719 | | class eEmp: public epdf { |
720 | | protected : |
721 | | //! Number of particles |
722 | | int n; |
723 | | //! Sample weights \f$w\f$ |
724 | | vec w; |
725 | | //! Samples \f$x^{(i)}, i=1..n\f$ |
726 | | Array<vec> samples; |
727 | | public: |
728 | | //! \name Constructors |
729 | | //!@{ |
730 | | eEmp ( ) :epdf ( ),w ( ),samples ( ) {}; |
731 | | eEmp ( const eEmp &e ) : epdf ( e ), w ( e.w ), samples ( e.samples ) {}; |
732 | | //!@} |
733 | | |
734 | | //! Set samples and weights |
735 | | void set_statistics ( const vec &w0, const epdf* pdf0 ); |
736 | | //! Set samples and weights |
737 | | void set_statistics ( const epdf* pdf0 , int n ) {set_statistics ( ones ( n ) /n,pdf0 );}; |
738 | | //! Set sample |
739 | | void set_samples ( const epdf* pdf0 ); |
740 | | //! Set sample |
741 | | void set_parameters ( int n0, bool copy=true ) {n=n0; w.set_size ( n0,copy );samples.set_size ( n0,copy );}; |
742 | | //! Potentially dangerous, use with care. |
743 | | vec& _w() {return w;}; |
744 | | //! Potentially dangerous, use with care. |
745 | | const vec& _w() const {return w;}; |
746 | | //! access function |
747 | | Array<vec>& _samples() {return samples;}; |
748 | | //! access function |
749 | | const Array<vec>& _samples() const {return samples;}; |
750 | | //! Function performs resampling, i.e. removal of low-weight samples and duplication of high-weight samples such that the new samples represent the same density. |
751 | | ivec resample ( RESAMPLING_METHOD method=SYSTEMATIC ); |
752 | | //! inherited operation : NOT implemneted |
753 | | vec sample() const {it_error ( "Not implemented" );return 0;} |
754 | | //! inherited operation : NOT implemneted |
755 | | double evallog ( const vec &val ) const {it_error ( "Not implemented" );return 0.0;} |
756 | | vec mean() const { |
757 | | vec pom=zeros ( dim ); |
758 | | for ( int i=0;i<n;i++ ) {pom+=samples ( i ) *w ( i );} |
759 | | return pom; |
760 | | } |
761 | | vec variance() const { |
762 | | vec pom=zeros ( dim ); |
763 | | for ( int i=0;i<n;i++ ) {pom+=pow ( samples ( i ),2 ) *w ( i );} |
764 | | return pom-pow ( mean(),2 ); |
765 | | } |
766 | | //! For this class, qbounds are minimum and maximum value of the population! |
767 | | void qbounds ( vec &lb, vec &ub, double perc=0.95 ) const { |
768 | | // lb in inf so than it will be pushed below; |
769 | | lb.set_size(dim); |
770 | | ub.set_size(dim); |
771 | | lb = std::numeric_limits<double>::infinity(); |
772 | | ub = -std::numeric_limits<double>::infinity(); |
773 | | int j; |
774 | | for ( int i=0;i<n;i++ ) { |
775 | | for ( j=0;j<dim; j++ ) { |
776 | | if ( samples ( i ) ( j ) <lb ( j ) ) {lb ( j ) =samples ( i ) ( j );} |
777 | | if ( samples ( i ) ( j ) >ub ( j ) ) {ub ( j ) =samples ( i ) ( j );} |
778 | | } |
779 | | } |
780 | | } |
781 | | }; |
| 884 | enum RESAMPLING_METHOD { MULTINOMIAL = 0, STRATIFIED = 1, SYSTEMATIC = 3 }; |
| 885 | /*! |
| 886 | \brief Weighted empirical density |
| 887 | |
| 888 | Used e.g. in particle filters. |
| 889 | */ |
| 890 | class eEmp: public epdf |
| 891 | { |
| 892 | protected : |
| 893 | //! Number of particles |
| 894 | int n; |
| 895 | //! Sample weights \f$w\f$ |
| 896 | vec w; |
| 897 | //! Samples \f$x^{(i)}, i=1..n\f$ |
| 898 | Array<vec> samples; |
| 899 | public: |
| 900 | //! \name Constructors |
| 901 | //!@{ |
| 902 | eEmp ( ) :epdf ( ),w ( ),samples ( ) {}; |
| 903 | eEmp ( const eEmp &e ) : epdf ( e ), w ( e.w ), samples ( e.samples ) {}; |
| 904 | //!@} |
| 905 | |
| 906 | //! Set samples and weights |
| 907 | void set_statistics ( const vec &w0, const epdf* pdf0 ); |
| 908 | //! Set samples and weights |
| 909 | void set_statistics ( const epdf* pdf0 , int n ) {set_statistics ( ones ( n ) /n,pdf0 );}; |
| 910 | //! Set sample |
| 911 | void set_samples ( const epdf* pdf0 ); |
| 912 | //! Set sample |
| 913 | void set_parameters ( int n0, bool copy=true ) {n=n0; w.set_size ( n0,copy );samples.set_size ( n0,copy );}; |
| 914 | //! Potentially dangerous, use with care. |
| 915 | vec& _w() {return w;}; |
| 916 | //! Potentially dangerous, use with care. |
| 917 | const vec& _w() const {return w;}; |
| 918 | //! access function |
| 919 | Array<vec>& _samples() {return samples;}; |
| 920 | //! access function |
| 921 | const Array<vec>& _samples() const {return samples;}; |
| 922 | //! Function performs resampling, i.e. removal of low-weight samples and duplication of high-weight samples such that the new samples represent the same density. |
| 923 | ivec resample ( RESAMPLING_METHOD method=SYSTEMATIC ); |
| 924 | //! inherited operation : NOT implemneted |
| 925 | vec sample() const {it_error ( "Not implemented" );return 0;} |
| 926 | //! inherited operation : NOT implemneted |
| 927 | double evallog ( const vec &val ) const {it_error ( "Not implemented" );return 0.0;} |
| 928 | vec mean() const |
| 929 | { |
| 930 | vec pom=zeros ( dim ); |
| 931 | for ( int i=0;i<n;i++ ) {pom+=samples ( i ) *w ( i );} |
| 932 | return pom; |
| 933 | } |
| 934 | vec variance() const |
| 935 | { |
| 936 | vec pom=zeros ( dim ); |
| 937 | for ( int i=0;i<n;i++ ) {pom+=pow ( samples ( i ),2 ) *w ( i );} |
| 938 | return pom-pow ( mean(),2 ); |
| 939 | } |
| 940 | //! For this class, qbounds are minimum and maximum value of the population! |
| 941 | void qbounds ( vec &lb, vec &ub, double perc=0.95 ) const |
| 942 | { |
| 943 | // lb in inf so than it will be pushed below; |
| 944 | lb.set_size ( dim ); |
| 945 | ub.set_size ( dim ); |
| 946 | lb = std::numeric_limits<double>::infinity(); |
| 947 | ub = -std::numeric_limits<double>::infinity(); |
| 948 | int j; |
| 949 | for ( int i=0;i<n;i++ ) |
| 950 | { |
| 951 | for ( j=0;j<dim; j++ ) |
| 952 | { |
| 953 | if ( samples ( i ) ( j ) <lb ( j ) ) {lb ( j ) =samples ( i ) ( j );} |
| 954 | if ( samples ( i ) ( j ) >ub ( j ) ) {ub ( j ) =samples ( i ) ( j );} |
| 955 | } |
| 956 | } |
| 957 | } |
| 958 | }; |
895 | | } |
896 | | |
897 | | template<class sq_T> |
898 | | epdf* enorm<sq_T>::marginal ( const RV &rvn ) const { |
899 | | it_assert_debug ( isnamed(), "rv description is not assigned" ); |
900 | | ivec irvn = rvn.dataind ( rv ); |
901 | | |
902 | | sq_T Rn ( R,irvn ); //select rows and columns of R |
903 | | |
904 | | enorm<sq_T>* tmp = new enorm<sq_T>; |
905 | | tmp->set_rv ( rvn ); |
906 | | tmp->set_parameters ( mu ( irvn ), Rn ); |
907 | | return tmp; |
908 | | } |
909 | | |
910 | | template<class sq_T> |
911 | | mpdf* enorm<sq_T>::condition ( const RV &rvn ) const { |
912 | | |
913 | | it_assert_debug ( isnamed(),"rvs are not assigned" ); |
914 | | |
915 | | RV rvc = rv.subt ( rvn ); |
916 | | it_assert_debug ( ( rvc._dsize() +rvn._dsize() ==rv._dsize() ),"wrong rvn" ); |
917 | | //Permutation vector of the new R |
918 | | ivec irvn = rvn.dataind ( rv ); |
919 | | ivec irvc = rvc.dataind ( rv ); |
920 | | ivec perm=concat ( irvn , irvc ); |
921 | | sq_T Rn ( R,perm ); |
922 | | |
923 | | //fixme - could this be done in general for all sq_T? |
924 | | mat S=Rn.to_mat(); |
925 | | //fixme |
926 | | int n=rvn._dsize()-1; |
927 | | int end=R.rows()-1; |
928 | | mat S11 = S.get ( 0,n, 0, n ); |
929 | | mat S12 = S.get ( 0, n , rvn._dsize(), end ); |
930 | | mat S22 = S.get ( rvn._dsize(), end, rvn._dsize(), end ); |
931 | | |
932 | | vec mu1 = mu ( irvn ); |
933 | | vec mu2 = mu ( irvc ); |
934 | | mat A=S12*inv ( S22 ); |
935 | | sq_T R_n ( S11 - A *S12.T() ); |
936 | | |
937 | | mlnorm<sq_T>* tmp=new mlnorm<sq_T> ( ); |
938 | | tmp->set_rv ( rvn ); tmp->set_rvc ( rvc ); |
939 | | tmp->set_parameters ( A,mu1-A*mu2,R_n ); |
940 | | return tmp; |
941 | | } |
| 1081 | } |
| 1082 | |
| 1083 | template<class sq_T> |
| 1084 | epdf* enorm<sq_T>::marginal ( const RV &rvn ) const |
| 1085 | { |
| 1086 | it_assert_debug ( isnamed(), "rv description is not assigned" ); |
| 1087 | ivec irvn = rvn.dataind ( rv ); |
| 1088 | |
| 1089 | sq_T Rn ( R,irvn ); //select rows and columns of R |
| 1090 | |
| 1091 | enorm<sq_T>* tmp = new enorm<sq_T>; |
| 1092 | tmp->set_rv ( rvn ); |
| 1093 | tmp->set_parameters ( mu ( irvn ), Rn ); |
| 1094 | return tmp; |
| 1095 | } |
| 1096 | |
| 1097 | template<class sq_T> |
| 1098 | mpdf* enorm<sq_T>::condition ( const RV &rvn ) const |
| 1099 | { |
| 1100 | |
| 1101 | it_assert_debug ( isnamed(),"rvs are not assigned" ); |
| 1102 | |
| 1103 | RV rvc = rv.subt ( rvn ); |
| 1104 | it_assert_debug ( ( rvc._dsize() +rvn._dsize() ==rv._dsize() ),"wrong rvn" ); |
| 1105 | //Permutation vector of the new R |
| 1106 | ivec irvn = rvn.dataind ( rv ); |
| 1107 | ivec irvc = rvc.dataind ( rv ); |
| 1108 | ivec perm=concat ( irvn , irvc ); |
| 1109 | sq_T Rn ( R,perm ); |
| 1110 | |
| 1111 | //fixme - could this be done in general for all sq_T? |
| 1112 | mat S=Rn.to_mat(); |
| 1113 | //fixme |
| 1114 | int n=rvn._dsize()-1; |
| 1115 | int end=R.rows()-1; |
| 1116 | mat S11 = S.get ( 0,n, 0, n ); |
| 1117 | mat S12 = S.get ( 0, n , rvn._dsize(), end ); |
| 1118 | mat S22 = S.get ( rvn._dsize(), end, rvn._dsize(), end ); |
| 1119 | |
| 1120 | vec mu1 = mu ( irvn ); |
| 1121 | vec mu2 = mu ( irvc ); |
| 1122 | mat A=S12*inv ( S22 ); |
| 1123 | sq_T R_n ( S11 - A *S12.T() ); |
| 1124 | |
| 1125 | mlnorm<sq_T>* tmp=new mlnorm<sq_T> ( ); |
| 1126 | tmp->set_rv ( rvn ); tmp->set_rvc ( rvc ); |
| 1127 | tmp->set_parameters ( A,mu1-A*mu2,R_n ); |
| 1128 | return tmp; |
| 1129 | } |