d<-read.csv("/home/aqm/course/data/Hollings.csv")
plot(d,pch=21,bg=2)
library(rjags)
## Loading required package: coda
## Linked to JAGS 4.2.0
## Loaded modules: basemod,bugs
library(ggplot2)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
reg_mod<-"
model{
#Likelihood
for( i in 1:n)
{
y[i]~dnorm(mu[i], tau)
mu[i]<-b0+b1*x[i]
}
#priors
b0~dnorm(0,.01)
b1~dnorm(0,.01)
tau<-pow(sd, -2)
sd~dunif(0,100)
}"
dat<-list(n=length(d$Resource),x=d$Resource,y=d$Consumption)
mod1 <- jags.model(textConnection(reg_mod), data = dat, n.chains = 3)
## Compiling model graph
## Resolving undeclared variables
## Allocating nodes
## Graph information:
## Observed stochastic nodes: 30
## Unobserved stochastic nodes: 3
## Total graph size: 133
##
## Initializing model
update(mod1, 1000)
mod1_sim <- coda.samples(model = mod1, variable.names = c("b0","b1"), n.iter = 5000)
tidybayes:::tidy_draws(mod1_sim)
## # A tibble: 15,000 x 5
## .chain .iteration .draw b0 b1
## <int> <int> <int> <dbl> <dbl>
## 1 1 1 1 14.1 0.0105
## 2 1 2 2 14.0 0.0105
## 3 1 3 3 14.0 0.00980
## 4 1 4 4 14.2 0.00981
## 5 1 5 5 13.8 0.0109
## 6 1 6 6 13.7 0.0110
## 7 1 7 7 13.7 0.0120
## 8 1 8 8 13.7 0.0119
## 9 1 9 9 13.5 0.0120
## 10 1 10 10 13.5 0.0118
## # ... with 14,990 more rows
summary(mod1_sim)
##
## Iterations = 2001:7000
## Thinning interval = 1
## Number of chains = 3
## Sample size per chain = 5000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## b0 13.80100 0.388201 3.170e-03 1.031e-02
## b1 0.01101 0.001202 9.815e-06 3.243e-05
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## b0 13.013397 13.55092 13.81010 14.06138 14.54484
## b1 0.008697 0.01021 0.01098 0.01178 0.01342
confint(lm(Consumption~Resource,data=d))
## 2.5 % 97.5 %
## (Intercept) 13.102428065 14.59200222
## Resource 0.008568358 0.01318626
hyp_mod<-"
model{
#Likelihood
for( i in 1:n)
{
y[i]~dnorm(mu[i], tau)
mu[i]<-b0*x[i]/(b1+x[i])
}
#priors
b0~dnorm(0,.01)
b1~dnorm(0,.01)
tau<-pow(sd, -2)
sd~dunif(0,100)
}"
mod1 <- jags.model(textConnection(hyp_mod), data = dat, n.chains = 3)
## Compiling model graph
## Resolving undeclared variables
## Allocating nodes
## Graph information:
## Observed stochastic nodes: 30
## Unobserved stochastic nodes: 3
## Total graph size: 163
##
## Initializing model
update(mod1, 1000)
mod1_sim <- coda.samples(model = mod1, variable.names = c("b0","b1"), n.iter = 5000)
summary(mod1_sim)
##
## Iterations = 2001:7000
## Thinning interval = 1
## Number of chains = 3
## Sample size per chain = 5000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## b0 19.80 0.1893 0.001546 0.004954
## b1 36.46 2.4260 0.019808 0.065876
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## b0 19.41 19.68 19.80 19.92 20.16
## b1 31.61 34.87 36.54 38.08 41.18
nlmod<-nls(Consumption~s*Resource/(F+Resource),data = d,start = list( F = 20,s=20))
confint(nlmod)
## Waiting for profiling to be done...
## 2.5% 97.5%
## F 33.97300 43.66697
## s 19.58576 20.32930
d<-read.csv("/home/aqm/course/data/buntings.csv")
plot(rate~density,data=d,pch=21,bg=2)
Trying to write the Bayesian model.
Use Gamma errors to avoid negative rates. Set gammas on both paprameters as priors. Use the rep-paramaterisation of gamma in terms of mean and standard deviation instead of shape and scale for clarity.
priors<-"
model{
#priors
a~dgamma( ((a_mu^2)/a_sig), (a_mu/a_sig))
H~dgamma( ((H_mu^2)/H_sig), (H_mu/H_sig))
sig~dunif(0.0001,1)
}"
dat<-list(a_mu=0.00117,a_sig= 0.0000053,H_mu=2.64950,H_sig=0.28727)
mod1 <- jags.model(textConnection(priors), data = dat, n.chains = 5)
## Compiling model graph
## Resolving undeclared variables
## Allocating nodes
## Graph information:
## Observed stochastic nodes: 0
## Unobserved stochastic nodes: 3
## Total graph size: 17
##
## Initializing model
update(mod1, 10000)
priors_mod <- coda.samples(model = mod1, variable.names = c("a","H"), n.iter = 50000)
plot(priors_mod)
hol_mod<-"
model{
#Likelihood
for( i in 1:n)
{
y[i] ~dgamma( ((mu[i]^2)/sig), (mu[i]/sig))
mu[i]<- a*x[i]/(1+a*x[i]*H)
}
#priors
a~dgamma( ((a_mu^2)/a_sig), (a_mu/a_sig))
H~dgamma( ((H_mu^2)/H_sig), (H_mu/H_sig))
sig~dunif(0.0001,1)
}"
The model still does not really work, even with informative priors derived from the quantile regression.
dat<-list(n=length(d$Resource),x=d$density,y=d$rate,a_mu=0.00117,a_sig= 0.0000053,H_mu=2.64950,H_sig=0.28727)
mod1 <- jags.model(textConnection(hol_mod), data = dat, n.chains = 5)
## Compiling model graph
## Resolving undeclared variables
## Allocating nodes
## Graph information:
## Observed stochastic nodes: 0
## Unobserved stochastic nodes: 3
## Total graph size: 310
##
## Initializing model
update(mod1, 10000)
mod1_sim <- coda.samples(model = mod1, variable.names = c("a","H"), n.iter = 50000)
summary(mod1_sim)
##
## Iterations = 10001:60000
## Thinning interval = 1
## Number of chains = 5
## Sample size per chain = 50000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## H 2.649695 0.535332 1.071e-03 1.073e-03
## a 0.001172 0.002294 4.588e-06 4.595e-06
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## H 1.708e+00 2.271e+00 2.6136986 2.989492 3.800001
## a 1.820e-09 1.437e-05 0.0002204 0.001243 0.007946
plot(mod1_sim)
HDmod<-nls(rate~a*density/(1+a*density*H),data = d,start = list(a =0.001,H=2))
confint(HDmod)
## Waiting for profiling to be done...
## 2.5% 97.5%
## a 0.002593086 0.006694939
## H 1.713495694 1.976978655