# Examples from slides9 # For STAT8810 # Fall, 2017 # M.T. Pratola # Example of Predictive Distribution source("dace.sim.r") set.seed(88) n=5 x1=runif(n) l1=list(m1=abs(outer(x1,x1,"-"))) l.dez=list(l1=l1) rho=c(0.0001) s2=1 # sim.field defaults to Gaussian correlation z=sim.field(l.dez,rho,s2) # Now we assume that the z's are observed error-free: y=z # Let's write down the predictive distribution # over a fine grid ng=100 xg=seq(0,1,length=ng) X=c(x1,xg) l1=list(m1=abs(outer(X,X,"-"))) l.dez=list(l1=l1) Rall=rhogeodacecormat(l.dez,rho)$R # Extract the sub-matrices we need Ryy=Rall[1:n,1:n] Rgg=Rall[(n+1):(n+ng),(n+1):(n+ng)] Rgy=Rall[(n+1):(n+ng),1:n] Ryy.inv=chol2inv(chol(Ryy)) # Mean of conditional distribution: m.cond=Rgy%*%Ryy.inv%*%y # Covariance of conditional distribution: E.cond=s2*(Rgg-Rgy%*%Ryy.inv%*%t(Rgy)) # Now the predictive distn is N(m.cond,E.cond). # Let's generate a realization! L=t(chol(E.cond+diag(ng)*1e-5)) u=rnorm(ng) z.cond=m.cond + L%*%u # And make a plot plot(x1,y,pch=20,col="red",cex=2,xlim=c(0,1),ylim=c(0,4)) lines(xg,m.cond,lwd=5,col="black") lines(xg,m.cond-1.96*sqrt(diag(E.cond)),lwd=2,col="black") lines(xg,m.cond+1.96*sqrt(diag(E.cond)),lwd=2,col="black") lines(xg,z.cond,col="grey") # Generate some more realizations plot(x1,y,pch=20,col="red",cex=2,xlim=c(0,1),ylim=c(0,4)) for(i in 1:100){ u=rnorm(ng) z.cond=m.cond + L%*%u lines(xg,z.cond,col="grey") } lines(xg,m.cond,lwd=5,col="black") lines(xg,m.cond-1.96*sqrt(diag(E.cond)),lwd=2,col="black") lines(xg,m.cond+1.96*sqrt(diag(E.cond)),lwd=2,col="black") # Example: lambda known, sampling posterior using Gibbs # Simple example with known precision, unknown mean. set.seed(88) # just to replicate this example lambda=0.5 mu=3.5 n=5 y=rnorm(n,mean=mu,sd=sqrt(1/lambda)) hist(y) N=1000 ybar=mean(y) mu0=0 lambda0=0.5 draw.mu=rep(0,N) lambda1=n*lambda+lambda0 mu1=1/lambda1*(n*lambda*ybar+lambda0*mu0) for(i in 2:N) { draw.mu[i]=rnorm(1,mean=mu1,sd=sqrt(1/lambda1)) } plot(draw.mu,type='l',xlab="Iteration",ylab=expression(mu)) # Drop "burn-in" draw.mu=draw.mu[(N/2):N] # Plot the prior, posterior and the truth x=seq(-10,10,length=100) dens.prior=dnorm(x,mean=mu0,sd=1/lambda0) plot(x,dens.prior,type='l',col="grey",lwd=2,xlim=c(-10,10),ylim=c(0,1),xlab=expression(mu),ylab="Density") lines(density(draw.mu),lwd=2,col="blue") abline(v=mu,lty=2,lwd=2) # Example: lambda unknown, sampling posterior using Gibbs # Simple example with unknown precision, unknown mean. set.seed(88) # just to replicate this example lambda=0.5 mu=3.5 n=5 y=rnorm(n,mean=mu,sd=sqrt(1/lambda)) hist(y,xlim=c(0,7)) N=5000 ybar=mean(y) mu0=0 lambda0=0.25 alpha=1 beta=1 draw.mu=rep(0,N) draw.lambda=rep(0,N) for(i in 2:N) { # Gibbs step for mu lambda1=n*draw.lambda[i-1]+lambda0 mu1=1/lambda1*(n*draw.lambda[i-1]*ybar+lambda0*mu0) draw.mu[i]=rnorm(1,mean=mu1,sd=sqrt(1/lambda1)) # Gibbs step for lambda alpha.n=alpha+n/2 beta.n=beta+0.5*sum(y-draw.mu[i])^2 draw.lambda[i]=rgamma(1,shape=alpha.n,rate=beta.n) } par(mfrow=c(1,2)) plot(draw.mu,type='l',xlab="Iteration",ylab=expression(mu)) plot(draw.lambda,type='l',xlab="Iteration",ylab=expression(lambda)) # Drop "burn-in" draw.mu=draw.mu[(N/2):N] draw.lambda=draw.lambda[(N/2):N] # Plot the prior, posterior and the truth x=seq(-10,10,length=100) dens.prior=dnorm(x,mean=mu0,sd=1/lambda0) plot(x,dens.prior,type='l',col="grey",lwd=2,xlim=c(-10,10),ylim=c(0,1),xlab=expression(mu),ylab="Density") lines(density(draw.mu),lwd=2,col="blue") abline(v=mu,lty=2,lwd=2) x=seq(0,10,length=100) dens.prior=dgamma(x,shape=alpha,rate=beta) plot(x,dens.prior,type='l',col="grey",lwd=2,xlim=c(0,10),ylim=c(0,1),xlab=expression(lambda),ylab="Density") lines(density(draw.lambda),lwd=2,col="blue") abline(v=lambda,lty=2,lwd=2)