In this vignette, we recreate the figures from the paper by Asger Hobolth, Mogens Bladt and Lars Nørvang Andersen entitled “Multivariate phase-type theory for the site frequency spectrum” (https://arxiv.org/abs/2101.04941). This vignette is only intended to illustrate how to reproduce the figures of the article. If you are looking for a more in-depth explanation on how to use the package for population genetics, please visit this vignette instead.
For reproducibility of the simulations below, we first fix the seed of the random generator. We then load the package and auxiliary functions, which we will need below. The auxiliary functions are included at the end of this vignette, and they can be copy-pasted into a source file if needed.
set.seed(0)
library(PhaseTypeR)
library(expm)
# source('auxiliary_functions.R')
Consider first, the left figure of Figure 1. This figure displays the coefficients of the site frequency spectrum for different unbiased estimators of the mutation rate \(\theta\). The coefficients of the H, L, Watterson’s, pairwise and singleton estimators are given Section 1.1 of Hobolth (2021). The coefficients of the BLUE estimator are given by \[\begin{eqnarray} \hat{\mathbf{c}}= \frac{\mathbf{\Lambda}^{-1}\mathbf{v}}{\mathbf{v}^{\ast}\;\mathbf{\Lambda}^{-1}\mathbf{v}}. \end{eqnarray}\] where \(\mathbf{v}= (1,1/2,\dots,1/(n-1))^\ast\) and \(\mathbf{\Lambda}\) is the covariance matrix of the site frequency spectrum, which, as described in Section 5 in Hobolth (2021) can be calculated in terms of the mean \(\mathbf{\mu}\) and covariance matrix \(\mathbf{\Sigma}\) of the underlying block counting process \[\begin{align} \mathbf{\Lambda}= \mbox{Var}\mathbf{Z}= \left(\frac{\theta}{2}\right)^2 \mathbf{\Sigma}+ \frac{\theta}{2} \mbox{diag}(\mathbf{\mu}) \end{align}\]
The latter two are easily calculated numerically using the package.
= 10
n
# Generate the block counting process for n = 10 as a
# phase-type distribution. The resulting object will be of class
# "mult_cont_phase_type" with subintensity matrix "subint_mat"
# and state space/reward matrix "reward_mat".
<- block_counting_process(n)
ph_bcp
<- mean(ph_bcp) #bmu = bold mu
bmu <- var(ph_bcp) #bSigma = bold Sigma bSigma
Using these two quantities, the coefficients are calculated below.
<- c(0.1,1,5,10,100) #Vector theta values for Figure 1
thetaVec <- 1/(1:(n-1)) #The vector bold v defined above
bv
##BLUE Estimators
= matrix(0,length(thetaVec),n-1)
coef_matrix for(i in 1:length(thetaVec)) {
= thetaVec[i]
theta =(theta/2)^2*bSigma + (theta/2)*diag( bmu )
bLambda=solve(bLambda)%*%bv/c(bv%*%solve(bLambda)%*%bv)
coef_matrix[i,]
}
## Watterson's estimator
<- rep(1,length(bv))/sum(bv)
xWatt ##-------------------------------------------------------------
## Singleton estimator
<- c(1,rep(0,(length(bv)-1)))
xsngltns ##-------------------------------------------------------------
## Pairwise difference estimator
<- ( 1:(n-1) )*( (n-1):1 )/n/(n-1)*2
xpair ##-------------------------------------------------------------
## H estimator
<- ( 1:(n-1) )^2 *2/n/(n-1)
xH ##-------------------------------------------------------------
## L estimator
<- ( 1:(n-1) )/(n-1)
xL ##---------------------------------------------------------------
## Plot the coefficients of the 5 different estimators (W,S,P,H,L)
plot(1:(n-1),xWatt,ylim=c(-0.5,2),col="black",lwd=2,type="l",xlim=c(1,n),
xlab=bquote(i),ylab=bquote(c[i]),cex.lab=1.5,lty=2,las=1,cex.axis=1.5)
abline(v=1:9,col="gray")
points(1:(n-1),xsngltns,col="black",type="l",lty=3,lwd=2)
points(1:(n-1),xpair,col="black",type="l",lty=4,lwd=2)
points(1:(n-1),xH,col="black",type="l",lty=5,lwd=2)
points(1:(n-1),xL,col="black",type="l",lty=6,lwd=2)
for (i in 1:length(thetaVec)){
= coef_matrix[i,]
xhat points(1:(n-1),xhat,type="l",lwd=2)
text(n-1,xhat[n-1],bquote(theta==.(thetaVec[i])),pos=4,cex=1.0)
}
<- c("Watterson","Singleton","Pairwise","H","L","BLUE")
txtVec <- c(2,3,4,5,6,1)
ltyVec <- c(4,5,1,3,2,6)
indx legend(2,2,txtVec[indx],lty=ltyVec[indx],lwd=2,bty="n",cex=1.2)
The right figure in Figure 1 displays the variances as a function of \(\theta\) - these can be calculated as \(\mathbf{c}^\ast \mathbf{\Lambda}\mathbf{c}= \mathbf{c}^\ast \mathbf{\Lambda}(\theta) \mathbf{c}\), where \(\mathbf{c}\) are the coefficients calculated above, and \(\mathbf{\Lambda}(\theta)\) is the covariance of the site frequency spectrum as above.
<- seq(0.01,2.5,by=0.1)
thetaVec <- length(thetaVec)
ntheta <- rep(0,ntheta) ; vrS <- rep(0,ntheta) ; vrP <- rep(0,ntheta)
vrW <- rep(0,ntheta) ; vrL <- rep(0,ntheta) ; vrMVUE <- rep(0,ntheta)
vrH for (i in 1:ntheta){
<- thetaVec[i]
tht =(tht^2/4)*bSigma + (tht/2)*diag( bmu )
bLambda<- (solve(bLambda) %*% bv)/as.numeric(bv %*% solve(bLambda) %*% bv)
xhat <- t(xhat) %*% bLambda %*% xhat
vrMVUE[i] <- t(xWatt) %*% bLambda %*% xWatt
vrW[i] <- t(xsngltns) %*% bLambda %*% xsngltns
vrS[i] <- t(xpair) %*% bLambda %*% xpair
vrP[i] <- t(xH) %*% bLambda %*% xH
vrH[i] <- t(xL) %*% bLambda %*% xL
vrL[i]
}plot(thetaVec,vrMVUE,type="l",lty=1,lwd=2,xlim=c(0,max(thetaVec)),
xlab=bquote(theta),ylab="Variance for estimator",cex.lab=1.5,las=1,cex.axis=1.5)
points(thetaVec,vrW,type="l",lty=2,lwd=2)
points(thetaVec,vrS,type="l",lty=3,lwd=2)
points(thetaVec,vrP,type="l",lty=4,lwd=2)
points(thetaVec,vrH,type="l",lty=5,lwd=2)
points(thetaVec,vrL,type="l",lty=6,lwd=2)
<- c(4,5,2,3,1,6)
indx legend("topleft",txtVec[indx],lty=ltyVec[indx],lwd=2,bty="n",cex=1.2)
Here, we numerically invert the characteristic function of the numerator of Tajima’s \(D\) and compare to simulated values. First, we obtain the rate-matrix and state space for the block counting process. First, we simulate using Theorem 3.1 in Asger (2021), from which it follows that the site frequency spectrum can be simulated by first sampling a random variate from the appropriate block counting process, and letting the entries of this variate be the rates of independent Poisson random variables. First, we obtain the block counting process for \(n=4\)
<- 4
n # create rate-matrix and state space for block counting process
<- block_counting_process(n)
ph_bcp
# Obtain sub-intensity matrix
<- ph_bcp$subint_mat
subintensity_matrix <- ph_bcp$reward_mat rew_mat
Next, we simulate random site frequency spectra.
<- 1e4
R <- t(rMPH(R,ph_bcp))
ph_mv_sim_obj <- 0.5 #lambda = theta/2
lambda = matrix(0,dim(ph_mv_sim_obj)[2],dim(ph_mv_sim_obj)[1])
ph_counts for(i in 1:R) {
= rpois(n-1,lambda*ph_mv_sim_obj[,i])
ph_counts[i,] }
The numerator of Tajima’s \(D\) is \(\hat{\theta}_{\pi}-\hat{\theta}_{\rm W}\), so the coefficient vector \(\mathbf{c} (c_i)\) is given by
\[ c_i = \frac{1}{\binom{n}{2}} i (n-i) - \frac{1}{\sum_{i=1}^{n-1} \frac{1}{i}} \, , \]
which is implemented below:
= (2*((1:(n-1))*( (n-1):1))/(n*(n-1)) - 1/sum(1/(1:(n-1)))) bc
For numeric reasons, we scale the coefficients by 1000 and re-scale later
<- 1000
res <- res*bc bc
According to equation (48) and (49) of Hobolth (2021), the characteristic function is obtained as
\[ \phi(t) = G(\mathrm{e}^{\mathrm{i}t}) \] where \[\begin{align*} G(z) = \mbox{Exp}[z^{\mathbf{c}^\ast \mathbf{xi}}] = \mbox{Exp}[z^{c_1 \xi_1} z^{c_2 \xi_2} \dots z^{c_{n-1} \xi_{n-1}}] = \mathbf{e}_1^\ast \left( \lambda \Delta[\mathbf{A}(z^\mathbf{c}- \mathbf{e})] +\mathbf{T}\right)^{-1} \mathbf{T}\mathbf{e} = \mathbf{e}_1^\ast \left( \mathbf{S}+ \lambda \Delta[\mathbf{A}(z^\mathbf{c}] \right)^{-1} \mathbf{T}\mathbf{e} \end{align*}\] where \(\mathbf{T},\mathbf{A}\) being respectively the sub-intensity matrix and reward matrix of the block counting process, \(\mathbf{e}= (1,1,\dots,1)^\ast\) and \(\mathbf{S}= \mathbf{T}- \lambda \Delta \mathbf{A}\mathbf{e}\).
The next snippet implements the \(\phi\)-function
<- sum(mean(ph_bcp)*bc) #the mean of the linear combination (should be 0)
themean <- subintensity_matrix #bold T
bT <- rew_mat #bold A
bA <- bT - lambda*diag(rowSums(bA)) #bold S
bS <- matrix(1,dim(bT)[1],1) #be = (1,1,...,1)^T
be <- matrix(0,1,dim(bT)[1]);beone[1]=1 #(1,0,...,0)
beone <- function(t) (exp(-1i*themean*t))*beone%*%solve(bS+lambda*diag(c(bA%*%(exp(1i*t)^bc))) )%*%bT%*%be phi
Finally, we approximate the CDF and re-scale
<- ApproxCDF(phi,H = 1e5,eta=0.0001,xlim=c(-0.5*res,1.5*res),smoothe=TRUE)
appvals
<- appvals[[1]]
xvals <- appvals[[2]]
yvals
#rescale
<- (1/res)*bc
bc2 <- (1/res)*xvals
xvals2 <- (1/res)*themean
themean2 <- ph_counts%*%bc2-c(themean2)
centered_sim2 <- ecdf(centered_sim2)
ecdfobj2
plot(xvals2,yvals,type="l",ylim=c(0,1),xlim=c(-1,1.5),xlab="",las=1,ylab="Cumulative distribution function",cex.axis=1.5,cex.lab=1.5)
lines(ecdfobj2,col="blue",col.01line = NULL)
lines(xvals2,yvals,lwd=2)
segments(-1.25, 0.025, x1 = xvals2[min(which(0.025<yvals))], y1 = 0.025,lty = 2)
segments(xvals2[min(which(0.025<yvals))], -1, x1 = xvals2[min(which(0.025<yvals))], y1 = 0.025,lty = 2)
segments(-1.25, 0.975, x1 = xvals2[max(which(0.975>yvals))], y1 = 0.975,lty = 2)
segments(xvals2[min(which(0.975<yvals))], -1, x1 = xvals2[max(which(0.975>yvals))], y1 = 0.975,lty = 2)
segments(xvals2[min(which(0.025<yvals))], 0, x1 = xvals2[max(which(0.975>yvals))], y1 = 0,lwd = 3)
points(xvals2[min(which(0.025<yvals))],-0.035,pch=17)
points(xvals2[max(which(0.975>yvals))],-0.035,pch=17)
Next, we run the above code with \(n=8\).
In this section we recreate the bottom two sub-figures of Figure 4, and first we consider the bottom left figure, which displays the CDFs of the \(i\)-ton branch-length for \(i = 1,2,3,4\). As described in Section 3, these branch-lengths all have (possibly defective) \(PH\) distributions, whose sub-intensity matrix (and possible defect) can be calculated from the sub-intensity matrix and state-space of the block-counting process, so we first compute these two objects:
<- 5
n
<- block_counting_process(n)
ph_bcp
<- ph_bcp$subint_mat
subintensity_matrix <- ph_bcp$reward_mat rew_mat
The reward matrix (which also represents the state-space of the block-counting process) is
rew_mat
## [,1] [,2] [,3] [,4]
## [1,] 5 0 0 0
## [2,] 3 1 0 0
## [3,] 2 0 1 0
## [4,] 1 2 0 0
## [5,] 1 0 0 1
## [6,] 0 1 1 0
As we see, the first column of the state-space matrix, \(\mathbf{r}= (5,3,2,1,1,0)^\ast\), then corresponds to the rewards which give rise to the singleton branch-lengths, which can then be generated as
= PH(subintensity_matrix)
ph <- reward_phase_type(ph, rew_mat[,1]) ph_rew_obj
The code below generates the phase-type distribution of the \(i\)-ton branches and their CDFs using the standard results for phase-type distributions (see e.g. Section 2.2 in Hobolth (2021))
\[ 1 - F(t) = \mbox{Prob}(\tau>t) = 1- \mathbf{e}_1 e^{\mathbf{S}t}\mathbf{e}, \] The code below plots the four distributions functions:
plot(1, type="n",xlim=c(0,4),ylim=c(0,1),
xlab="t",ylab="Cumulative distribution function",cex.lab=1.5,
main=bquote("Branch length distributions for i-tons with sample size"~n==.(n)),las=1,cex.axis=1.5,cex.main=1.4)
for(i in 1:(n-1)) {
<- reward_phase_type(ph,rew_mat[,i])
ph_rew_obj <- matrix(1,length(ph_rew_obj$init_probs),1)
be <- function(u) {
abstime 1 - ph_rew_obj$init_probs%*%expm(ph_rew_obj$subint_mat*u)%*%be
}<- Vectorize(abstime)
abstime curve(abstime,lty=i,add=TRUE,lwd=2)
}
legend("bottomright",c("singleton","doubleton","tripleton","quardrupleton"),
lty=1:4,bty="n",cex=1.6,lwd=2)
The the bottom right plot of Figure 4, we find the point probabilities of \(i\) mutations \(i=1,\dots,4\) corresponding to phase-type distributions of the bottom left figure. These are easily computed using Theorem 2.3 which in this case has the interpretation, that adding mutations to branches whose length is \(PH\)-distributed, gives a \(DPH\) distribution, whose sub-transition matrix is given by
\[\begin{equation} \mathbf{M}=\Big(\mathbf{I}-\frac{2}{\theta} \mathbf{S}\Big)^{-1}. \tag{$\spadesuit$} \end{equation}\]
where \(\mathbf{S}\) is the sub-transition matrix of the underlying phase-type distribution.
plot(1, type="n", xlim=c(0, 4), ylim=c(0, 1),las=1,xlab="Number of mutations",ylab="Probability",
main=bquote("Probability of mutations for"~theta==1),cex.lab=1.5,cex.main=1.4,cex.axis=1.5)
for(i in 1:(n-1)) {
=reward_phase_type(ph, rew_mat[,i])
ph_rew_obj<- ph_rew_obj$subint_mat
bS <- solve(diag(dim(bS)[1])-2*bS)
bM <- ph_rew_obj$init_probs
bpi <- matrix(1,diag(dim(bS)[1],1))
be <- be - bM%*%be
bm <- apply(matrix(0:5),1,function(i) bpi%*%(bM%^%i)%*%bm)
probs 1] <- probs[1] + ph_rew_obj$defect #note we have to take the possible defect into account
probs[points(0:5,probs,pch=16)
lines(0:5,probs,lty=i,lwd=2)
}legend("topright",c("singleton","doubleton","tripleton","quardrupleton"),
lty=1:4,bty="n",cex=1.5,lwd=2)
Figure 6 displays the point probabilities of the three integer–weighted estimators for \(\theta\) \((\hat{\theta}_{\pi}, \hat{\theta}_H)\) and \(\hat{\theta}_L\) for \(n=4\) and \(n=6\). First, we consider the left sub-figure, i.e. \(n=4\). The three estimators are all integer-weighted versions of the site frequency spectrum, i.e., of the form \(\mathbf{a}^\ast \mathbf{xi}= a_1 \xi_1 + a_2 \xi_2 + \dots + a_{n-1} \xi_{n-1}\), where the \(a_i\)s are integers. As described in Section 4 of Hobolth (2021), these estimators have a \(DPH\) distribution, whose sub-transition matrix is constructed using the entries of (\(\spadesuit\)), so we first, calculate the latter:
<- 4
n <- 1
theta <- block_counting_process(n)
ph_bcp
<- ph_bcp$subint_mat
subintensity_matrix <- ph_bcp$reward_mat
rew_mat
= PH(subintensity_matrix)
ph
# The reward vector is the rows sums of the state space matrix
<- reward_phase_type(ph, rowSums(rew_mat))
ph_rew_obj <- ph_rew_obj$subint_mat
bS
<- solve(diag(dim(bS)[1])-(2/theta)*bS) bM
Next, we define the three \(\mathbf{a}\) vectors of coefficients. These are \((1,2,3)\) , \((3,4,3)\) and \((1,4,9)\) for \((\hat{\theta}_L)\),\((\hat{\theta}_{\pi})\) and \((\hat{\theta}_{H})\) respectively.
<- matrix(c(1:(n-1),(1:(n-1))*((n-1):1),(1:(n-1))^2),n-1,n-1,byrow=TRUE)
baMat baMat
## [,1] [,2] [,3]
## [1,] 1 2 3
## [2,] 3 4 3
## [3,] 1 4 9
The construction of the DPH-representation is described in Section 4 of Hobolth (2021) and is implemented in the function , which is invoked in the code-snippet below.
<- n*(n-1)+1
len <- matrix(0,3,len)
probsMat
for(i_outer in 1:3) {
<- baMat[i_outer,] #This is the current a-vector
ba <- DPHrep(bM,ph_bcp$reward_mat,ba)
DPH_obj <- DPH_obj[[1]]
bMt <- DPH_obj[[2]]
sizes_of_blocks <- rep(0,dim(bMt)[1])
beone 1]] <- 1
beone[sizes_of_blocks[<- matrix(rep(1,dim(bMt)[1]))
be <- be - bMt%*%be
bmt <- rep(0,len)
probs for(i in 1:len) {
<- beone%*%(bMt%^%(i-1))%*%bmt
probs[i]
}<- probs
probsMat[i_outer,]
}#Finally, plot the figures
<- bquote("Integer-weighted SFS distributions for"~n == .(n))
main.txt
<- c(0,(1:(2*(n-1)))/(n-1))
xs plot(xs,probsMat[1,1:(2*(n-1)+1)],type="l",lty="dashed",pch=19,ylim=c(0,probsMat[1,1]),xlab="Weighted SFS",ylab="Probability",
main=main.txt,cex.main=1.4,cex.lab=1.5,las=1,lwd=2,cex.axis=1.20)
points(xs,probsMat[1,1:(2*(n-1)+1)],type="p",pch=19)
<- c(0,(1:((n*(n-1))))/(n*(n-1)/2))
xs points(xs,probsMat[2,],type="l",lty="dotted",lwd=2)
points(xs,probsMat[2,],type="p",pch=19)
points(xs,probsMat[3,],type="l",lty="solid",lwd=2)
points(xs,probsMat[3,],type="p",pch=19)
abline(v=1,col="gray")
<- bquote("mean"~theta==1)
txt text(1,probsMat[1,1],txt,pos=2,cex=1.5)
legend("topright",c("Pairwise","H","L"),lwd=2,
lty=c("dotted","solid","dashed"),bty="n",cex=1.5)
The figure for \(n=6\) is created in a similar fashion
<- 6
n <- 1
theta <- block_counting_process(n)
ph_bcp
<- ph_bcp$subint_mat
subintensity_matrix <- ph_bcp$reward_mat
rew_mat
<- PH(subintensity_matrix)
ph
# The reward vector is the rows sums of the state space matrix
<- reward_phase_type(ph, rowSums(rew_mat))
ph_rew_obj <- ph_rew_obj$subint_mat
bS
<- solve(diag(dim(bS)[1])-(2/theta)*bS)
bM
<- matrix(c(1:(n-1),(1:(n-1))*((n-1):1),(1:(n-1))^2),n-3,n-1,byrow=TRUE)
baMat
<- n*(n-1)+1
len <- matrix(0,3,len)
probsMat
for(i_outer in 1:3) {
<- baMat[i_outer,] #This is the current a-vector
ba <- DPHrep(bM,ph_bcp$reward_mat,ba)
DPH_obj <- DPH_obj[[1]]
bMt <- DPH_obj[[2]]
sizes_of_blocks <- rep(0,dim(bMt)[1])
beone 1]] <- 1
beone[sizes_of_blocks[<- matrix(rep(1,dim(bMt)[1]))
be <- be - bMt%*%be
bmt <- rep(0,len)
probs for(i in 1:len) {
<- beone%*%(bMt%^%(i-1))%*%bmt
probs[i]
}<- probs
probsMat[i_outer,]
}
<- c(0,(1:(2*(n-1)))/(n-1))
xs
<- bquote("Integer-weighted SFS distributions for"~n==.(n))
main.txt
plot(xs,probsMat[1,1:(2*(n-1)+1)],type="l",lty="dashed",pch=19,ylim=c(0,probsMat[1,1]),xlab="Weighted SFS",ylab="Probability",
main=main.txt,cex.main=1.4,cex.lab=1.5,las=1,lwd=2,cex.axis=1.20)
points(xs,probsMat[1,1:(2*(n-1)+1)],type="p",pch=19)
<- c(0,(1:((n*(n-1))))/(n*(n-1)/2))
xs points(xs,probsMat[2,],type="l",lty="dotted",lwd=2)
points(xs,probsMat[2,],type="p",pch=19)
points(xs,probsMat[3,],type="l",lty="solid",lwd=2)
points(xs,probsMat[3,],type="p",pch=19)
<- c(0,(1:(2*(n-1)))/(n-1))
xs
abline(v=1,col="gray")
<- bquote("mean"~theta==1)
txt text(1,probsMat[1,1],txt,pos=2,cex=1.5)
legend("topright",c("Pairwise","H","L"),lwd=2,
lty=c("dotted","solid","dashed"),bty="n",cex=1.5)
The last figure compares the CDF of the the BLUE estimator for \(\theta = 1\) and \(\theta = 10\) to Watterson’s estimator and the pairwise estimator. The code is given below without further comments, as it re-uses the code snippets above with minor modifications.
<- 10
n <- 5
lambda <- block_counting_process(n)
ph_bcp
set.seed(0)
<- t(rMPH(R,ph_bcp))
ph_mv_sim_obj
set.seed(19)
<- matrix(0,dim(ph_mv_sim_obj)[2],dim(ph_mv_sim_obj)[1])
ph_counts for(i in 1:R) {
<- rpois(n-1,lambda*ph_mv_sim_obj[,i])
ph_counts[i,]
}
<- ph_bcp$subint_mat
subintensity_matrix
<- ph_bcp$reward_mat
rew_mat
<- coef_matrix[4,] #coef_matrix was generated above, its rows correspond to the entries of thetavec, fourth entry is theta = 10
bc <- 1000
res <- res*bc
bc #compute characteristic function
<- lambda*sum(mean(ph_bcp)*bc) #the mean of the linear combination
themean <- subintensity_matrix #bold T
bT <- rew_mat #bold A
bA <- bT - lambda*diag(rowSums(bA)) #bold S
bS <- matrix(1,dim(bT)[1],1) #bold one = (1,1,...,1)^T
be <- matrix(0,1,dim(bT)[1]);beone[1]=1 #(1,0,...,0)
beone
<- function(t) (exp(-1i*themean*t))*beone%*%solve(bS+lambda*diag(c(bA%*%(exp(1i*t)^bc))) )%*%bT%*%be
phi
#Invert numerically
<- ApproxCDF(phi,H = 1e5,eta=0.0001,xlim=c(-10*res,10*res),smoothe=TRUE)
appvals
<- appvals[[1]]
xvals <- appvals[[2]]
yvals
<- (1/res)*bc
bc2 <- (1/res)*xvals
xvals2 <- (1/res)*themean
themean2
#Compute point probabilities of Watterson's Theta for theta = 10
<- PH(ph_bcp$subint_mat)
ph <- reward_phase_type(ph, rowSums(ph_bcp$reward_mat))
watter <- 5
lambda
<- solve( diag(dim(watter$subint_mat)[1])-(1/lambda)*watter$subint_mat)
bM <- rowSums(diag(dim(bM)[1]) - bM)
bm
<- 1/sum(1/1:(n-1)) #"normalizing" constant, in Watterson's theta
a1 <- 250 #number of points to include
len
<- rep(0,len)
out for(i in 0:(len-1)) {
+1] <- ph$init_probs%*%(bM%^%i)%*%bm
out[i
}
<- -10+a1*(0:(len-1)) # Possible values for Watterson's Theta
wxt
# Pairwise estimator for theta = 10
<- ( 1:(n-1) )*( (n-1):1 ) #Coefficients of pair-wise estimator
ba <- DPHrep(bM,ph_bcp$reward_mat,ba)
DPH_obj <- DPH_obj[[1]]
bMt <- DPH_obj[[2]]
sizes_of_blocks
<- rep(0,dim(bMt)[1])
beone 1]] <- 1
beone[sizes_of_blocks[<- matrix(rep(1,dim(bMt)[1]))
be <- be - bMt%*%be
bmt
= 1e3
len
# Running bMt - the bMt is pretty large, computing its matrix-powers in the naive way as above is time-consuming.
<- rep(0,len)
probs2 <- beone
run_bMT for(i in 1:len) {
<- run_bMT%*%bmt
probs2[i] <- run_bMT%*%bMt
run_bMT
}
<- n*(n-1)/2 #Normalizing constant of the pairwise estimator
a1 <- -10+(1/a1)*(0:(len-1))
wx plot(xvals2,yvals,type="l",ylim=c(0,1),xlab="",ylab="Cumulative distribution function",cex.lab=1.5,las = 1,cex.axis=1.5)
lines(stepfun(wx,cumsum(c(0,probs2))),col="red",do.points=FALSE,lwd=2)
lines(xvals2,yvals,type="l",ylim=c(0,1),xlab="",ylab="",lwd=2)
lines(stepfun(wxt,cumsum(c(0,out))),col="blue",do.points=FALSE,lwd=2)
legend("bottomright",c("BLUE","Watterson","Pairwise"),
col=c("black","blue","red"),bty="n",cex=1.6,lwd=2)
#'Numerical Approximation of characteristic function
#'
#'\code{ApproxCDF} approximates the cdf F when given a characteristic function phi of a centered random variable, using the formula found in Waller (1995) with
#'original reference to Bohman (1974). The procedure can be numerically unstable in the tails of the distribution, so
#'only the center of the approximation is returned. Some simplifying approximations explained in "Numerical inversion of Laplace transform and characteristic function"
#'are used. Note that phi should have a finite first moment.
#'
#'@param phi the characteristic function to be inverted
#'@param H A total of 2H+1 values of F are approximated. By default H of these values are returned unless an interval is provided.
#'@param eta A scaling paramter. By default equidistant points in the interval (-2*phi/eta,2*phi/(eta)) are approximated.
#'@param xlim (optional) limits on the x-axis
#'@param smoothe (optional) Should smoothing be used? If TRUE default weights of the function \code{simple_smoothe} are used. If an object of length > 1 is provided,
#'this will be passed to \code{simple_smoothe}
#'
#'@examples
#'phi <- function(t) exp(-t^2/2)
#'appvals=ApproxCDF(phi,H=1000,eta=0.5,xlim=c(-3,3))
#'plot(appvals[[1]],appvals[[2]],type="l",lwd=2)
#'lines(appvals[[1]],pnorm(appvals[[1]]),type="l",col="red")
#'
#'phi <- function(t) sqrt(2)*abs(t)*besselK(sqrt(2)*abs(t),1)
#'appvals=ApproxCDF(phi,H=10000,eta=0.1,xlim=c(-3,3))
#'plot(appvals[[1]],appvals[[2]],type="l",lwd=2)
#'lines(appvals[[1]],pt(appvals[[1]],df=2),type="l",col="red")
#'
#'@export
= function(phi,H=2000,eta=0.5,xlim=NULL,smoothe=FALSE) {
ApproxCDF = rep(0,H)
z_vals = 1
co for(n in 1:(H-1)) {
+1]= phi(eta*n)/(pi*1i*(n)) #start at index 2 - the first value of z is 0
z_vals[co= co + 1
co
}
=0.5+(1:H)/H-Re(fft(z_vals,inverse=FALSE))
yvals_pos=0.5-(1:H)/H-Re(fft(z_vals,inverse=TRUE))
yvals_neg= 2*pi*(1:H)/(eta*H)
xvals_pos = -xvals_pos
xvals_neg = rev(xvals_neg)
xvals_neg = rev(yvals_neg)
yvals_neg = c(xvals_neg,xvals_pos)
xvals = c(yvals_neg,yvals_pos)
yvals
if(!is.null(xlim)) {
= intersect(which(xvals>xlim[1]),which(xvals<xlim[2]))
indexes
}else {
= (1:(H+1))+floor( (H-1)/2)
indexes
}
= xvals[indexes]
xvals = yvals[indexes]
yvals if(smoothe) {
if(length(smoothe)>1) {
= simple_smoothe(yvals,smoothe)
yvals
}else {
= simple_smoothe(yvals)
yvals
}
}return(list(xvals,yvals))
}
#' Simple smoothing
#'
#' \code{simple_smoothe} computes a simple moving weighted average of a input vector \code{x}. The weight vector must have an odd number of entries, and defaults to
#' \code{c(0.1,0.20,0.4,0.20,0.1)}
#'
#' @param x input to be smoothed
#' @param svec smoothing vector
#'
#'@examples
#'smoothed_yvals = simple_smoothe(yvals)
#'smoothed_yvals = simple_smoothe(yvals,c(0.2,0.6,0.2))
#'
#'@export
<- function(x,svec= c(0.1,0.20,0.4,0.20,0.1)) {
simple_smoothe if ((length(svec) %% 2) == 0) {stop("Please provide an odd number of smoothing weigths")}
= x
out = floor(length(svec)/2)
offset for(i in (1+offset):(length(x)-offset)) {
= sum(x[(i-offset):(i+offset)]*svec)
out[i]
}return(out)
}
#' Construction of DPH-representation
#'
#'
#' \code{DPHrep} computes the representation of of a integer-linear-combination of the Site Frequency as a discrete phase-type distribution.
#' The construction is described in Section 7.1 of Hobolth (2020).
#'
#' @param bM Subtransition probabilities un the underlying discrete Markov chain (cf. Figure 6).
#' @param bA Statespace of the underlying block-counting process
#' @param ba Vector of integer coeffcients
#'
#' @return List consisting of
#' bMt: The constructed matrix subtransition probabilities.
#' sizes_of_blocks: The sizes of the constructed blocks.
#'
#' @examples
#' ba = c(1,2,3)
#' ph_bcp = block_counting_process(4)
#' subintensity_matrix = ph_bcp$subint_mat
#' bA = ph_bcp$reward_mat
#' ph = PH(subintensity_matrix)
#' ph_rew_obj = reward_phase_type(ph, rowSums(rew_mat))
#' bS = ph_rew_obj$subint_mat
#' bM = solve(diag(dim(bS)[1])-(2/theta)*bS)
#' DPHrep(ba,bM)
#'
#'
#' @export
<- function(bM,bA,ba) {
DPHrep = nrow(bA) #this is p in the paper
m = rep(0,m) #obtain the sizes of the blocks using formula XX
sizes_of_blocks for(i in 1:m) {
=max(ba*(bA[i,] > 0))
sizes_of_blocks[i]
}
= matrix(0,sum(sizes_of_blocks),sum(sizes_of_blocks)) #bold-Mtilde
bMt for(i in 1:m) {
for(j in 1:m) {
if(i <= j) { #off-diagonal blocks
= rep(0,sizes_of_blocks[j])
bmvec # The bottom row is calculated using formula DD
for(k in 1:sizes_of_blocks[j]) {
-k+1]=sum(bA[j,]*(ba == k))
bmvec[sizes_of_blocks[j]
}= bM[i,j]*bmvec/sum(bmvec)
bmvec = sum(sizes_of_blocks[1:i])
cur_i if(j == 1) {
= 1
cur_j
}else {
= sum(sizes_of_blocks[1:(j-1)]) + 1
cur_j
}:(cur_j+sizes_of_blocks[j]-1)] = bmvec
bMt[cur_i,cur_j
}# The diagonal-blocks are treated as a separate case
if((i == j) && sizes_of_blocks[i] > 1) {
= sizes_of_blocks[i]
size_of_current_block = sum(sizes_of_blocks[1:i]) - size_of_current_block + 1
cur_i = sum(sizes_of_blocks[1:j]) - size_of_current_block + 2
cur_j :(cur_i + size_of_current_block - 2),cur_j:(cur_j + size_of_current_block - 2)] = diag(size_of_current_block-1) #add identity-matrix of appropriate size
bMt[cur_i
}
}
}return(list(bMt,sizes_of_blocks))
}
#' Rate matrix and state space of the block counting process
#'
#'
#' \code{RateMatAndStateSpace} finds the state space and corresponding rate matrix
#' for the block counting process for a number of samples n in the
#' standard coalescent.
#'
#' @param n Number of samples
#'
#' @return List consisting of
#' RateM: Rate matrix
#' StSpM: Matrix with rows corresponding to the states
#' A state is a n-dimensional row vector (a1,...,an).
#' For example the beginning state is (n,0,...,0),
#' the next state is (n-2,1,0,...,0),
#' and the ending state is (0,...,0,1)
#'
#' @examples
#' RateMAndStateSpace(8)
#'
#'
#'
#' @export
<- function(n){
RateMAndStateSpace ##----------------------------------------------------
## State space
##----------------------------------------------------
## Size of the state space (number of states)
<- partitions::P(n)
nSt ## Definition of the state space
<- matrix(ncol=n,nrow=nSt)
StSpM ## Set of partitions of [n]
<- partitions::parts(n)
x ## Rewriting the partitions as (a1,...,an)
for (i in 1:nSt) {
<- x[,i]
st <- tabulate(x[,i],nbins=n)
StSpM[i,]
}## Reordering
<- StSpM[order(rowSums(StSpM),decreasing=TRUE),]
StSpM ## Because of this ordering we can't 'go back', i.e.
## below the diagonal the entries are always zero
##----------------------------------------------------
## Intensity matrix
##----------------------------------------------------
<- matrix(0,ncol=nSt,nrow=nSt)
RateM ## Following Algorithm 4.2
for (i in 1:(nSt-1)){
for (j in (i+1):nSt){
# cat(i," state i",StSpM[i,])
# cat(" ",j," state j",StSpM[j,])
<- StSpM[i,]-StSpM[j,]
cvec # cat(" cvec",cvec)
## Two branches are merged, i.e. removed from state i
<- sum(cvec[cvec>0])==2
check1 # cat(" check1",check1)
## One new branch is created, i.e. added in state from j
<- sum(cvec[cvec<0])==-1
check2 # cat(" check2",check2)
if (check1 & check2){
## Size(s) of the block(s) and the corresponding rates
<- StSpM[i,which(cvec>0)]
tmp <- ifelse(length(tmp)==1,tmp*(tmp-1)/2,prod(tmp))
RateM[i,j]
}
}
}## Diagonal part of the rate matrix
for (i in 1:nSt){
<- -sum(RateM[i,])
RateM[i,i]
}return(list(RateM=RateM,StSpM=StSpM))
}
#' \code{block_counting_process} return a the block counting process for a given sample size
#' as a \code{mult_cont_phase_type} object.
#'
#' @param n Number of samples
#'
#' @return ph_rew_ob
#' A \code{mult_cont_phase_type} representation of the block counting process of size n
#'
#' @examples
#' block_counting_process(8)
#'
#' @export
<- function(n){
block_counting_process
= RateMAndStateSpace(n)
RMASS = dim(RMASS$RateM)[1] #(m should be equal to partitions::P(n))
m # Obtain subintensity matrix
= PH(RMASS$RateM[1:(m-1),1:(m-1)])
ph
# The reward matrix is the state space matrix of the block counting process, except the row & column related to the absorbing state.
= RMASS$StSpM[1:(m-1),1:(n-1)]
rew_mat
= MPH(ph$subint_mat, ph$init_probs, reward_mat = rew_mat)
ph_rew_obj return(ph_rew_obj)
}