diff --git a/R/LCx.survFit.R b/R/LCx.survFit.R
index fea47b517e57e4baa26516fceadea48adc203bd9..e5811ec5dd4918a90fee67eb165c5242dd66f0ce 100644
--- a/R/LCx.survFit.R
+++ b/R/LCx.survFit.R
@@ -1,226 +1,209 @@
-#' Predict \eqn{x}\% Lethal Concentration at any specified time point for 
-#' a \code{survFit} object.
-#' 
-#' The function \code{LCx}, \eqn{x}\% Lethal Concentration (\eqn{LC_x}), is use to compute
-#'  the dose required to kill \eqn{x}\% of the members of a tested population
-#'  after a specified test duration (\code{time_LCx}) (default is the maximum
-#'  time point of the experiment).
-#'  
-#'  Mathematical definition of \eqn{x}\% Lethal Concentration at time \eqn{t},
-#'  denoted \eqn{LC(x,t)}, is:
-#'  
-#'  \eqn{S(LC(x,t), t) = S(0, t)*(1- x/100)},
-#'  
-#'  where \eqn{S(LC(x,t), t)} is the survival probability at concentration
-#'  \eqn{LC(x,t)} at time \eqn{t}, and \eqn{S(0,t)} is the survival probability at
-#'  no concentration (i.e. concentration is \eqn{0}) at time \eqn{t} which
-#'  reflect the background mortality \eqn{h_b}:
-#'  
-#'  \eqn{S(0, t) = exp(-hb* t)}.
-#'   
-#'  In the function \code{LCx}, we use the median of \eqn{S(0,t)} to rescale the
-#'  \eqn{x}\% Lethal Concentration at time \eqn{t}.
-#'  
-#' @rdname LCX
-#' 
-#' @param object An object of class \code{survFit}
-#' @param X Percentage of individuals dying (e.g., \eqn{50} for \eqn{LC_{50}}, \eqn{10} for \eqn{LC_{10}}, ...)
-#' @param time_LCx A number giving the time at which  \eqn{LC_{x}} has to be estimated. 
-#' If NULL, the latest time point of the experiment is used.
-#' @param conc_range A vector of length 2 with minimal and maximal value of the 
-#' range of concentration. If NULL, the range is
-#' define between 0 and the highest tested concentration of the experiment.
-#' @param npoints Number of time point in \code{conc_range} between 0 and the maximal concentration. 100 by default.
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @return The function returns an object of class \code{LCx}, which is a list
-#'  with the following information:
-#' \item{X_prop}{Survival probability of individuals surviving considering the median
-#'  of the background mortality (i.e. \eqn{S(0, t)*(1- x/100)})}
-#' \item{X_prop_provided}{Survival probability of individuals surviving as provided in arguments (i.e. \eqn{(100-X)/100)}}
-#' \item{time_LCx}{A number giving the time at which  \eqn{LC_{x}} has to be
-#'  estimated as provided in arguments or if NULL, the latest time point of the
-#'   experiment is used.}
-#' \item{df_LCx}{A \code{data.frame} with quantiles (median, 2.5\% and 97.5\%)
-#'  of \eqn{LC_{X}} at time \code{time_LCx} for \eqn{X}\% of individuals}
-#' \item{df_dose}{A \code{data.frame} with four columns: \code{concentration}, and median \code{q50} and 95\% credible interval
-#'  (\code{qinf95} and \code{qsup95}) of the survival probability at time \code{time_LCx}}
-#' 
-#'    
-#' @examples 
-#' 
-#' # (1) Load the data
-#' data("propiconazole")
-#' 
-#' # (2) Create an object of class 'survData'
-#' dataset <- survData(propiconazole)
-#' 
-#' \donttest{
-#' # (3) Run the survFit function with model_type SD (or IT)
-#' out_SD <- survFit(dataset, model_type = "SD")
-#' 
-#' # (4) estimate LC50 at time 4
-#' LCx(out_SD, X = 50, time_LCx = 4)
-#' }
-#' 
-#' @import zoo
-#' @importFrom stats approx
-#' 
-#' @export
-#' 
-LCx.survFit <- function(object,
-                        X,
-                        time_LCx = NULL,
-                        conc_range = NULL,
-                        npoints = 100,
-                        ...){
-  
-  if(is.null(conc_range)){
-    conc_range = seq(0, max(object$jags.data$conc), length.out = npoints)
-  } else{
-    if(length(conc_range) != 2){
-      stop('conc_range must a vector of length 2 with minimal and maximal value of the range of concentration')
-    }
-    conc_range = seq(conc_range[1], conc_range[2], length.out = npoints)
-  }
-  
-  if(min(conc_range) != 0){
-    stop("Minimal value of 'conc_range' must be 0.")
-  }
-  
-  if(is.null(time_LCx)){
-    time_LCx = max(object$jags.data$time)
-  }
-  
-  df_dose <- doseResponse_survFitCstExp(x = object, time_LCx = time_LCx, conc_range, npoints)
-  
-  median_backgroundMortality_Conc0 = dplyr::filter(df_dose, concentration == 0)$q50
-  
-  X_prop_provided <- (100-X)/100
-  
-  X_prop <- (100-X)/100*median_backgroundMortality_Conc0
-  
-  df_LCx <- pointsLCx(df_dose, X_prop)
-  
-  object_LCx <- list(X_prop = X_prop,
-                     X_prop_provided = X_prop_provided,
-                     time_LCx = time_LCx,
-                     df_LCx = df_LCx,
-                     df_dose = df_dose)
-  class(object_LCx) <- c("LCx", "list")
-  
-  return(object_LCx)
-}
-
-# dose response curve
-# 
-doseResponse_survFitCstExp <- function(x, time_LCx,
-                                       conc_range, npoints){
-  
-  model_type = x$model_type
-  
-  # parameters
-  mctot <- do.call("rbind", x$mcmc)
-  kd <- 10^mctot[, "kd_log10"]
-  # "hb" is not in survFit object of morse <v3.2.0
-  if("hb" %in% colnames(mctot)){
-    hb <- mctot[, "hb"]  
-  } else{ hb <- 10^mctot[, "hb_log10"] }
-  
-  # all theorical
-  k <- 1:length(conc_range)
-  j <- 1:npoints
-  
-  if(model_type == "SD"){
-    
-    if(is.null(time_LCx)){
-      time_LCx = max(x$jags.data$time)
-    }
-    
-    z <- 10^mctot[, "z_log10"]
-    kk <- 10^mctot[, "kk_log10"]
-    
-    dtheo <- lapply(k, function(kit) { # conc
-        Surv_SD(Cw = conc_range[kit],
-                time = time_LCx,
-                kk = kk,
-                kd = kd,
-                z = z,
-                hb = hb)
-    })
-  }
-  if(model_type == "IT"){
-    alpha <- 10^mctot[, "alpha_log10"]
-    beta <- 10^mctot[, "beta_log10"]
-    
-    if(is.null(time_LCx)){
-      time_LCx = max(x$jags.data$time)
-    }
-    
-    dtheo <- lapply(k, function(kit) { # concentration pour chaque concentration
-      Surv_IT_LCx(Cw = conc_range[kit],
-              time = time_LCx,
-              kd = kd,
-              hb = hb,
-              alpha = alpha,
-              beta = beta)
-    })
-  }
-  
-  # transpose dtheo
-  dtheo <- do.call("rbind", lapply(dtheo, t))
-  
-  # quantile
-  qinf95 <- apply(dtheo, 1, quantile, probs = 0.025, na.rm = TRUE)
-  qsup95 <- apply(dtheo, 1, quantile, probs = 0.975, na.rm = TRUE)
-  q50 <- apply(dtheo, 1, quantile, probs = 0.5, na.rm = TRUE)
-  
-  df_dose_Resp = data.frame(concentration = conc_range,
-                            q50 = q50,
-                            qinf95 = qinf95,
-                            qsup95 = qsup95)
-}
-
-Surv_IT_LCx <- function(Cw, time, kd, hb, alpha, beta)
-{
-  D <- Cw*(1-exp(-kd * time))
-  S <- exp(-hb * time) * ( 1- 1/(1 + (D/alpha)^(- beta))) 
-  return(S)
-}
-
-# points for LCx
-# 
-pointsLCx <- function(df_dose, X_prop){
-  
-  if(min(df_dose$q50) < X_prop & X_prop < max(df_dose$q50)){
-    LCX_q50 = approx( df_dose$q50, df_dose$concentration, xout = X_prop, ties = mean)$y
-  } else {
-    LCX_q50 = NA
-    warning(paste("No median for survival probability of", X_prop,
-                  " in the range of concentrations under consideration: [",
-                  min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
-  }
-  if(min(df_dose$qinf95) < X_prop & X_prop < max(df_dose$qinf95)){
-    LCX_qinf95 = approx( df_dose$qinf95, df_dose$concentration, xout = X_prop, ties = mean)$y
-  } else{
-    LCX_qinf95 = NA
-    warning(paste("No 95%inf for survival probability of", X_prop ,
-                  " in the range of concentrations under consideration: [",
-                  min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
-  }
-  if(min(df_dose$qsup95) < X_prop & X_prop < max(df_dose$qsup95)){
-    LCX_qsup95 = approx( df_dose$qsup95, df_dose$concentration, xout = X_prop, ties = mean)$y
-  } else{
-    LCX_qsup95 = NA
-    warning(paste("No 95%sup for survival probability of", X_prop,
-                  " in the range of concentrations under consideration: [",
-                  min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
-  }
-  
-  df_LCx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
-                       LCx = as.numeric(c(LCX_q50, LCX_qinf95, LCX_qsup95)))
-    # as.numeric is needed here because if all values are NA, LCx has type logical
-  return(df_LCx)
-}
-
-
+#' Predict \eqn{x}\% Lethal Concentration at any specified time point for 
+#' a \code{survFit} object.
+#' 
+#' The function \code{LCx}, \eqn{x}\% Lethal Concentration (\eqn{LC_x}), is use to compute
+#'  the dose required to kill \eqn{x}\% of the members of a tested population
+#'  after a specified test duration (\code{time_LCx}) (default is the maximum
+#'  time point of the experiment).
+#'  
+#'  Mathematical definition of \eqn{x}\% Lethal Concentration at time \eqn{t},
+#'  denoted \eqn{LC(x,t)}, is:
+#'  
+#'  \eqn{S(LC(x,t), t) = S(0, t)*(1- x/100)},
+#'  
+#'  where \eqn{S(LC(x,t), t)} is the survival probability at concentration
+#'  \eqn{LC(x,t)} at time \eqn{t}, and \eqn{S(0,t)} is the survival probability at
+#'  no concentration (i.e. concentration is \eqn{0}) at time \eqn{t} which
+#'  reflect the background mortality \eqn{h_b}:
+#'  
+#'  \eqn{S(0, t) = exp(-hb* t)}.
+#'   
+#'  In the function \code{LCx}, we use the median of \eqn{S(0,t)} to rescale the
+#'  \eqn{x}\% Lethal Concentration at time \eqn{t}.
+#'  
+#' @rdname LCX
+#' 
+#' @param object An object of class \code{survFit}
+#' @param X Percentage of individuals dying (e.g., \eqn{50} for \eqn{LC_{50}}, \eqn{10} for \eqn{LC_{10}}, ...)
+#' @param time_LCx A number giving the time at which  \eqn{LC_{x}} has to be estimated. 
+#' If NULL, the latest time point of the experiment is used.
+#' @param conc_range A vector of length 2 with minimal and maximal value of the 
+#' range of concentration. If NULL, the range is
+#' define between 0 and the highest tested concentration of the experiment.
+#' @param npoints Number of time point in \code{conc_range} between 0 and the maximal concentration. 100 by default.
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @return The function returns an object of class \code{LCx}, which is a list
+#'  with the following information:
+#' \item{X_prop}{Survival probability of individuals surviving considering the median
+#'  of the background mortality (i.e. \eqn{S(0, t)*(1- x/100)})}
+#' \item{X_prop_provided}{Survival probability of individuals surviving as provided in arguments (i.e. \eqn{(100-X)/100)}}
+#' \item{time_LCx}{A number giving the time at which  \eqn{LC_{x}} has to be
+#'  estimated as provided in arguments or if NULL, the latest time point of the
+#'   experiment is used.}
+#' \item{df_LCx}{A \code{data.frame} with quantiles (median, 2.5\% and 97.5\%)
+#'  of \eqn{LC_{X}} at time \code{time_LCx} for \eqn{X}\% of individuals}
+#' \item{df_dose}{A \code{data.frame} with four columns: \code{concentration}, and median \code{q50} and 95\% credible interval
+#'  (\code{qinf95} and \code{qsup95}) of the survival probability at time \code{time_LCx}}
+#' 
+#' @import zoo
+#' @importFrom stats approx
+#' 
+#' @export
+#' 
+LCx.survFit <- function(object,
+                        X,
+                        time_LCx = NULL,
+                        conc_range = NULL,
+                        npoints = 100,
+                        ...){
+  
+  if(is.null(conc_range)){
+    conc_range = seq(0, max(object$jags.data$conc), length.out = npoints)
+  } else{
+    if(length(conc_range) != 2){
+      stop('conc_range must a vector of length 2 with minimal and maximal value of the range of concentration')
+    }
+    conc_range = seq(conc_range[1], conc_range[2], length.out = npoints)
+  }
+  
+  if(min(conc_range) != 0){
+    stop("Minimal value of 'conc_range' must be 0.")
+  }
+  
+  if(is.null(time_LCx)){
+    time_LCx = max(object$jags.data$time)
+  }
+  
+  df_dose <- doseResponse_survFitCstExp(x = object, time_LCx = time_LCx, conc_range, npoints)
+  
+  median_backgroundMortality_Conc0 = dplyr::filter(df_dose, concentration == 0)$q50
+  
+  X_prop_provided <- (100-X)/100
+  
+  X_prop <- (100-X)/100*median_backgroundMortality_Conc0
+  
+  df_LCx <- pointsLCx(df_dose, X_prop)
+  
+  object_LCx <- list(X_prop = X_prop,
+                     X_prop_provided = X_prop_provided,
+                     time_LCx = time_LCx,
+                     df_LCx = df_LCx,
+                     df_dose = df_dose)
+  class(object_LCx) <- c("LCx", "list")
+  
+  return(object_LCx)
+}
+
+# dose response curve
+# 
+doseResponse_survFitCstExp <- function(x, time_LCx,
+                                       conc_range, npoints){
+  
+  model_type = x$model_type
+  
+  # parameters
+  mctot <- do.call("rbind", x$mcmc)
+  kd <- 10^mctot[, "kd_log10"]
+  # "hb" is not in survFit object of morse <v3.2.0
+  if("hb" %in% colnames(mctot)){
+    hb <- mctot[, "hb"]  
+  } else{ hb <- 10^mctot[, "hb_log10"] }
+  
+  # all theorical
+  k <- 1:length(conc_range)
+  j <- 1:npoints
+  
+  if(model_type == "SD"){
+    
+    if(is.null(time_LCx)){
+      time_LCx = max(x$jags.data$time)
+    }
+    
+    z <- 10^mctot[, "z_log10"]
+    kk <- 10^mctot[, "kk_log10"]
+    
+    dtheo <- lapply(k, function(kit) { # conc
+        Surv_SD(Cw = conc_range[kit],
+                time = time_LCx,
+                kk = kk,
+                kd = kd,
+                z = z,
+                hb = hb)
+    })
+  }
+  if(model_type == "IT"){
+    alpha <- 10^mctot[, "alpha_log10"]
+    beta <- 10^mctot[, "beta_log10"]
+    
+    if(is.null(time_LCx)){
+      time_LCx = max(x$jags.data$time)
+    }
+    
+    dtheo <- lapply(k, function(kit) { # concentration pour chaque concentration
+      Surv_IT_LCx(Cw = conc_range[kit],
+              time = time_LCx,
+              kd = kd,
+              hb = hb,
+              alpha = alpha,
+              beta = beta)
+    })
+  }
+  
+  # transpose dtheo
+  dtheo <- do.call("rbind", lapply(dtheo, t))
+  
+  # quantile
+  qinf95 <- apply(dtheo, 1, quantile, probs = 0.025, na.rm = TRUE)
+  qsup95 <- apply(dtheo, 1, quantile, probs = 0.975, na.rm = TRUE)
+  q50 <- apply(dtheo, 1, quantile, probs = 0.5, na.rm = TRUE)
+  
+  df_dose_Resp = data.frame(concentration = conc_range,
+                            q50 = q50,
+                            qinf95 = qinf95,
+                            qsup95 = qsup95)
+}
+
+Surv_IT_LCx <- function(Cw, time, kd, hb, alpha, beta)
+{
+  D <- Cw*(1-exp(-kd * time))
+  S <- exp(-hb * time) * ( 1- 1/(1 + (D/alpha)^(- beta))) 
+  return(S)
+}
+
+# points for LCx
+# 
+pointsLCx <- function(df_dose, X_prop){
+  
+  if(min(df_dose$q50) < X_prop & X_prop < max(df_dose$q50)){
+    LCX_q50 = approx( df_dose$q50, df_dose$concentration, xout = X_prop, ties = mean)$y
+  } else {
+    LCX_q50 = NA
+    warning(paste("No median for survival probability of", X_prop,
+                  " in the range of concentrations under consideration: [",
+                  min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
+  }
+  if(min(df_dose$qinf95) < X_prop & X_prop < max(df_dose$qinf95)){
+    LCX_qinf95 = approx( df_dose$qinf95, df_dose$concentration, xout = X_prop, ties = mean)$y
+  } else{
+    LCX_qinf95 = NA
+    warning(paste("No 95%inf for survival probability of", X_prop ,
+                  " in the range of concentrations under consideration: [",
+                  min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
+  }
+  if(min(df_dose$qsup95) < X_prop & X_prop < max(df_dose$qsup95)){
+    LCX_qsup95 = approx( df_dose$qsup95, df_dose$concentration, xout = X_prop, ties = mean)$y
+  } else{
+    LCX_qsup95 = NA
+    warning(paste("No 95%sup for survival probability of", X_prop,
+                  " in the range of concentrations under consideration: [",
+                  min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
+  }
+  
+  df_LCx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
+                       LCx = as.numeric(c(LCX_q50, LCX_qinf95, LCX_qsup95)))
+    # as.numeric is needed here because if all values are NA, LCx has type logical
+  return(df_LCx)
+}
+
+
diff --git a/R/MFx.survFit.R b/R/MFx.survFit.R
index 65ae3fbd7f68c92bb3423217a5106d321699fa31..69606be2817e545db86d2c737fa993acd94023d8 100644
--- a/R/MFx.survFit.R
+++ b/R/MFx.survFit.R
@@ -1,443 +1,419 @@
-#' Predict x\% Multiplication Factor at any specified time point for 
-#' a \code{survFit} object.
-#' 
-#' The function \code{MFx}, \eqn{x}\% Multiplication Factor at time \eqn{t}, (\eqn{MF(x,t)}),
-#' is used to compute the multiplication factor
-#' applied to the concentration exposure profile in order to
-#' reduce by \eqn{x}\% (argument \code{X}) the survival probability at a
-#'  specified test duration \eqn{t} (argument \code{time_MFx}) (default is the maximum
-#'  time point of the experiment).
-#'  
-#'  Mathematical definition of \eqn{x}\% Multiplication Factor at time \eqn{t}
-#'  (at the end of a time series \eqn{T = \{0, \dots, t\}}),
-#'  denoted \eqn{MF(x,t)}, is given by:
-#'  
-#'  \eqn{S(MF(x,t) * C_w(\tau \in T), t) = S( C_w(\tau \in T), t)*(1- x/100)},
-#'  
-#'  where \eqn{C_w(\tau \in T)} is the initial exposure profile without
-#'  multiplication factor. And so the expression \eqn{S(MF(x,t)* C_w(\tau \in T), t)}
-#'   is the survival probability after an exposure profile
-#'    \eqn{MF(x,t)* C_w(\tau \in T)} at time \eqn{t}.
-#'   
-#' @rdname MFx
-#' 
-#' @param object An object of class \code{survFit}.
-#' @param data_predict A dataframe with two columns \code{time} and \code{conc}.
-#' @param X Percentage of survival change (e.g., \eqn{50} for survival decrease of 50\%
-#'  , or \eqn{-50} for survival increase of 50\%).The default is 50. 
-#'  Only time series computed during the adaptation using a binary search in
-#'  \eqn{O(log(n))} are returned. However, if \code{NULL}, all time series
-#'  computed from the vector \code{MFx_range} are returned.
-#' @param time_MFx A number giving the time at which  \eqn{MF(x,t)} has to be estimated. 
-#' If NULL, the latest time point of the profile is used.
-#' @param MFx_range A vector from which lower and upper bound of the range of the
-#'  multiplication factor \code{MFx} are generated. The default is a vector \code{c(0, 1000)}.
-#' If argument \code{X} is \code{NULL}, then all the time series generated with
-#' \code{MFx_range} are returned.
-#' @param mcmc_size Can be used to reduce the number of MCMC samples in order to speed up
-#'  the computation. The default is 1000.
-#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into
-#'  account from the posterior.
-#' If \code{FALSE}, parameter \code{hb} is set to 0. The default is \code{TRUE}.
-#' @param spaghetti If \code{TRUE}, return a set of survival curves using
-#' parameters drawn from the posterior distribution.
-#' @param accuracy Accuracy of the multiplication factor. The default is 0.01.
-#' @param quiet If \code{FALSE}, print the evolution of accuracy.
-#' @param threshold_iter Threshold number of iteration.
-#' @param hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}.
-#' @param ode IF \code{ode} is \code{TRUE}, algo use predict_ode rather than predict. Default is \code{TRUE}.
-#' @param interpolate_length Length of the time sequence for which output is wanted.
-#' @param interpolate_method The interpolation method for concentration. See package \code{deSolve} for details.
-#' Default is \code{linear}.
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @return The function returns an object of class \code{MFx}, which is a list
-#'  with the following information:
-#'  \item{X_prop}{Survival probability for \code{X} percent of reduction of the initial median 
-#' survival probability at time \code{time_MFx}.}
-#' \item{X_prop_provided}{A number giving the proportion of reduction in survival.}
-#' \item{time_MFx}{A number giving the time at which  \eqn{MF(x,t)} has to be
-#'  estimated as provided in arguments or if NULL, the latest time point of the
-#'   profile is used.}
-#' \item{df_MFx}{A \code{data.frame} with quantiles (median, 2.5\% and 97.5\%)
-#'  of \eqn{MF(x,t)} at time \eqn{t}, \code{time_MFx}, for \eqn{x}\% of survival reduction.}
-#' \item{df_dose}{A \code{data.frame} with quantiles (median, 2.5\% and 97.5\%)
-#'  of survival probability along the computed multiplication factor and at time \code{time_MFx}.}
-#' \item{MFx_tested}{A vector of all multiplication factors computed.} 
-#' \item{ls_predict}{A list of all object of class \code{survFitPredict} obtained
-#' from computing survival probability for every profiles build from the vector of
-#' multiplication factors \code{MFx_tested}.}
-#' 
-#'    
-#' @examples 
-#' 
-#' # (1) Load the data
-#' data("propiconazole")
-#' 
-#' # (2) Create an object of class 'survData'
-#' dataset <- survData(propiconazole)
-#' 
-#' \donttest{
-#' # (3) Run the survFit function with model_type SD (or IT)
-#' out_SD <- survFit(dataset, model_type = "SD")
-#' 
-#' # (4) data to predict
-#' data_4prediction <- data.frame(time = 1:10, conc = c(0,0.5,3,3,0,0,0.5,3,1.5,0))
-#' 
-#' # (5) estimate MF(x=30, t=4), that is for 30% reduction of survival at time 4
-#' MFx_SD_30.4 <- MFx(out_SD, data_predict = data_4prediction , X = 30, time_MFx = 4)
-#' 
-#' # (5bis) estimate MF(x,t) along the MF_range from 5 to 10 (50) (X = NULL)
-#' MFx_SD_range <- MFx(out_SD, data_predict = data_4prediction ,
-#'                     X = NULL, time_MFx = 4, MFx_range = seq(5, 10, length.out = 50))
-#' }
-#' 
-#' 
-#' @export
-#' 
-MFx.survFit <- function(object,
-                        data_predict,
-                        X = 50,
-                        time_MFx = NULL,
-                        MFx_range = c(0,1000),
-                        mcmc_size = 1000,
-                        hb_value = TRUE,
-                        spaghetti = FALSE,
-                        accuracy = 0.01,
-                        quiet = FALSE,
-                        threshold_iter = 100,
-                        hb_valueFORCED = 0,
-                        ode=TRUE,
-                        interpolate_length = NULL,
-                        interpolate_method = "linear",
-                        ...){
-  
-  ## Analyse data_predict data.frame
-  if(!all(colnames(data_predict) %in% c("conc", "time")) || ncol(data_predict) != 2){
-    stop("The argument 'data_predict' is a dataframe with two columns 'time' and 'conc'.")
-  }
-  
-  ## Check time_MFx
-  if(is.null(time_MFx))  time_MFx = max(data_predict$time)
-
-  if(!(time_MFx %in% data_predict$time)){
-    stop("Please provide a 'time_MFx' corresponding to a time-point at which concentration is provided.
-            Interpolation of concentration is too specific to be automatized.")
-  }
-  
-  ls_data_predict <- list()
-  ls_predict <- list()
-
-  ls_data_predict[[1]] <- data_predict
-  ls_data_predict[[1]]$replicate <- rep("predict_MFx_1", nrow(data_predict))
-  
-  if(ode == TRUE){
-    ls_predict[[1]] <- predict_ode( object = object,
-                                data_predict = ls_data_predict[[1]],
-                                spaghetti = spaghetti,
-                                mcmc_size = mcmc_size,
-                                hb_value = hb_value,
-                                hb_valueFORCED = hb_valueFORCED,
-                                interpolate_length = interpolate_length,
-                                interpolate_method = interpolate_method)
-  } else{
-    ls_predict[[1]] <- predict( object = object,
-                                data_predict = ls_data_predict[[1]],
-                                spaghetti = spaghetti,
-                                mcmc_size = mcmc_size,
-                                hb_value = hb_value,
-                                hb_valueFORCED = hb_valueFORCED )
-  }
-  
-
-
-  filter_time_MFx = dplyr::filter(ls_predict[[1]]$df_quantile, time == time_MFx)
-
-  median_Mortality_test <- filter_time_MFx$q50
-  theoretical_X <- (100 - X) / 100 * filter_time_MFx$q50 # Necessary to compared with accuracy
-
-  if(!is.null(X)){
-    binarySearch_MFx_q50 <- binarySearch_MFx(object = object,
-                                             spaghetti = spaghetti,
-                                             mcmc_size = mcmc_size,
-                                             hb_value = hb_value,
-                                             MFx_range = MFx_range,
-                                             time_MFx = time_MFx,
-                                             theoretical_X = theoretical_X,
-                                             value_mortality_test = median_Mortality_test,
-                                             accuracy = accuracy,
-                                             data_predict = data_predict,
-                                             ls_data_predict = ls_data_predict,
-                                             ls_predict = ls_predict,
-                                             quiet = quiet,
-                                             quantile = "q50",
-                                             threshold_iter = threshold_iter,
-                                             hb_valueFORCED = hb_valueFORCED,
-                                             ode = ode) # "q50", "qinf95", "qsup95"
-    binarySearch_MFx_qinf95 <- binarySearch_MFx(object = object,
-                                                spaghetti = spaghetti,
-                                                mcmc_size = mcmc_size,
-                                                hb_value = hb_value,
-                                                MFx_range = MFx_range,
-                                                time_MFx = time_MFx,
-                                               theoretical_X = theoretical_X,
-                                               value_mortality_test = filter_time_MFx$qinf95,
-                                               accuracy = accuracy,
-                                               data_predict = data_predict,
-                                               ls_data_predict = ls_data_predict,
-                                               ls_predict = ls_predict,
-                                               quiet = quiet,
-                                               quantile = "qinf95",
-                                               threshold_iter = threshold_iter,
-                                               hb_valueFORCED = hb_valueFORCED,
-                                               ode = ode) # "q50", "qinf95", "qsup95"
-    binarySearch_MFx_qsup95 <- binarySearch_MFx(object = object,
-                                                spaghetti = spaghetti,
-                                                mcmc_size = mcmc_size,
-                                                hb_value = hb_value,
-                                                MFx_range = MFx_range,
-                                                time_MFx = time_MFx,
-                                               theoretical_X = theoretical_X,
-                                               value_mortality_test = filter_time_MFx$qsup95,
-                                               accuracy = accuracy,
-                                               data_predict = data_predict,
-                                               ls_data_predict = ls_data_predict,
-                                               ls_predict = ls_predict,
-                                               quiet = quiet,
-                                               quantile = "qsup95",
-                                               threshold_iter = threshold_iter,
-                                               hb_valueFORCED = hb_valueFORCED,
-                                               ode = ode) # "q50", "qinf95", "qsup95"
-    
-    #
-    # Make a dataframe with quantile of all generated time series
-    #
-    
-    ls_predict_quantile_q50 <- lapply(binarySearch_MFx_q50$k, function(kit){
-      df_quantile <- binarySearch_MFx_q50$ls_predict[[kit]]$df_quantile
-      df_quantile$MFx <- rep(binarySearch_MFx_q50$MFx[[kit]], nrow(binarySearch_MFx_q50$ls_predict[[kit]]$df_quantile))
-      return(df_quantile)
-    })
-    ls_predict_quantile_qinf95 <- lapply(binarySearch_MFx_qinf95$k, function(kit){
-      df_quantile <- binarySearch_MFx_qinf95$ls_predict[[kit]]$df_quantile
-      df_quantile$MFx <- rep(binarySearch_MFx_qinf95$MFx[[kit]], nrow(binarySearch_MFx_qinf95$ls_predict[[kit]]$df_quantile))
-      return(df_quantile)
-    })
-    ls_predict_quantile_qsup95 <- lapply(binarySearch_MFx_qsup95$k, function(kit){
-      df_quantile <- binarySearch_MFx_qsup95$ls_predict[[kit]]$df_quantile
-      df_quantile$MFx <- rep(binarySearch_MFx_qsup95$MFx[[kit]], nrow(binarySearch_MFx_qsup95$ls_predict[[kit]]$df_quantile))
-      return(df_quantile)
-    })
-    
-    predict_MFx_quantile_q50 <- do.call("rbind", ls_predict_quantile_q50)
-    predict_MFx_quantile_qinf95 <- do.call("rbind", ls_predict_quantile_qinf95)
-    predict_MFx_quantile_qsup95 <- do.call("rbind", ls_predict_quantile_qsup95)
-    #
-    # doseResponse dataframe at specific time_MFx
-    #
-    df_dose_q50 <- dplyr::filter(predict_MFx_quantile_q50, time == time_MFx)
-    df_dose_q50$id = rep("q50", nrow(df_dose_q50))
-    df_dose_qinf95 <- dplyr::filter(predict_MFx_quantile_qinf95, time == time_MFx)
-    df_dose_qinf95$id = rep("qinf95", nrow(df_dose_qinf95))
-    df_dose_qsup95 <- dplyr::filter(predict_MFx_quantile_qsup95, time == time_MFx)
-    df_dose_qsup95$id = rep("qsup95", nrow(df_dose_qsup95))
-    
-    ## Additional element to return
-    df_dose <- do.call("rbind", list(df_dose_q50, df_dose_qinf95, df_dose_qsup95))
-    MFx <- binarySearch_MFx_q50$MFx
-    ls_predict <- binarySearch_MFx_q50$ls_predict
-    
-  }
-  if(is.null(X)){
-    theoretical_X = NULL # to return in the final object
-    
-    MFx <- MFx_range
-    
-    k <- 1:length(MFx_range)
-
-    ls_data_predict <- lapply(k, function(kit){
-      profil_test <- data_predict
-      profil_test$conc <- MFx[kit] * data_predict$conc
-      profil_test$replicate <- rep(paste0("predict_MFx_", MFx[kit]), nrow(data_predict))
-      return(profil_test)
-    })
-    
-    ls_predict <- lapply(k, function(kit){
-      if(ode == TRUE){
-        predict_ode(object = object,
-                data_predict = ls_data_predict[[kit]],
-                spaghetti = spaghetti,
-                mcmc_size = mcmc_size,
-                hb_value = hb_value,
-                hb_valueFORCED = hb_valueFORCED)
-      } else{
-        predict(object = object,
-                data_predict = ls_data_predict[[kit]],
-                spaghetti = spaghetti,
-                mcmc_size = mcmc_size,
-                hb_value = hb_value,
-                hb_valueFORCED = hb_valueFORCED)
-      }
-
-    })
-    
-    #
-    # Make a dataframe with quantile of all generated time series
-    #
-    
-    ls_predict_quantile <- lapply(k, function(kit){
-      df_quantile <- ls_predict[[kit]]$df_quantile
-      df_quantile$MFx <- rep(MFx[[kit]], nrow(ls_predict[[kit]]$df_quantile))
-      return(df_quantile)
-    })
-    predict_MFx_quantile <- do.call("rbind", ls_predict_quantile)
-    
-    df_dose <- dplyr::filter(predict_MFx_quantile, time == time_MFx)
-    
-  }
-  
-  #
-  # Compute table with the optimal MFx obtained if X != NULL
-  #
-  if(!is.null(X)){
-    
-    MFx_q50 = df_dose_q50$MFx[nrow(df_dose_q50)]
-    MFx_qinf95 = df_dose_qinf95$MFx[nrow(df_dose_qinf95)]
-    MFx_qsup95 = df_dose_qsup95$MFx[nrow(df_dose_qsup95)]
-    # Compute MFx q95:
-    # pts_MFx <- pointsMFx(df_dose, median_Mortality_test)
-    # MFx_qinf95 <- pts_MFx$MFx_qinf95
-    # MFx_qsup95 <- pts_MFx$MFx_qsup95
-    # 
-    # Return dataframe of quantiles MFx
-    
-    df_MFx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
-                         MFx = c(MFx_q50, MFx_qinf95, MFx_qsup95))
-  } else{
-    df_MFx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
-                         MFx = c(NA, NA, NA))
-  }
-  
-# warning("This is not an error message:
-# Just take into account that MFx as been estimated with a binary
-# search using the 'accuracy' argument. To improve the shape of the curve, you
-# can use X = NULL, and computed time series around the median MFx, with the
-#           vector `MFx_range`.")
-  
-  ls_out = list(X_prop = theoretical_X,
-                X_prop_provided = X/100,
-                time_MFx = time_MFx,
-                df_MFx = df_MFx,
-                df_dose = df_dose, # return MFx at specific time
-                MFx_tested = MFx,
-                ls_predict = ls_predict)
-  
-  class(ls_out) = c("list", "MFx")
-  
-  return(ls_out)
-}
-
-
-
-
-##########################
-#
-#
-#
-
-#
-# binary search of MFx in O(log n)
-#
-
-binarySearch_MFx <- function(object,
-                             spaghetti,
-                             mcmc_size,
-                             hb_value,
-                             MFx_range,
-                             time_MFx,
-                             theoretical_X,
-                             value_mortality_test,
-                             accuracy,
-                             data_predict,
-                             ls_data_predict,
-                             ls_predict,
-                             quiet,
-                             quantile, # "q50", "qinf95", "qsup95"
-                             threshold_iter,
-                             hb_valueFORCED,
-                             ode
-                             ){
-    #
-    # binary search of MFx in O(log n)
-    #
-    i = 1
-    MFx = 1
-    MFx_min = min(MFx_range)
-    MFx_max = max(MFx_range)
-    MFx_test = max(MFx_range)
-    
-    while(abs(theoretical_X - value_mortality_test) > accuracy){
-      
-      MFx = c(MFx, MFx_test)
-      
-      ls_data_predict[[i+1]] <- data_predict
-      ls_data_predict[[i+1]]$conc <- MFx_test * data_predict$conc
-      ls_data_predict[[i+1]]$replicate <- rep(paste0("predict_MFx_", MFx_test), nrow(data_predict))
-      
-      if(ode == TRUE){
-        ls_predict[[i+1]] <- predict_ode(object = object,
-                                     data_predict = ls_data_predict[[i+1]],
-                                     spaghetti = spaghetti,
-                                     mcmc_size = mcmc_size,
-                                     hb_value = hb_value,
-                                     hb_valueFORCED = hb_valueFORCED)
-      } else{
-        ls_predict[[i+1]] <- predict(object = object,
-                                     data_predict = ls_data_predict[[i+1]],
-                                     spaghetti = spaghetti,
-                                     mcmc_size = mcmc_size,
-                                     hb_value = hb_value,
-                                     hb_valueFORCED = hb_valueFORCED)
-      }
-      
-      
-      filter_time_MFx = dplyr::filter(ls_predict[[i+1]]$df_quantile, time == time_MFx)
-      if(quantile == "q50"){ value_mortality_test = filter_time_MFx$q50 }
-      if(quantile == "qinf95"){ value_mortality_test = filter_time_MFx$qinf95 }
-      if(quantile == "qsup95"){ value_mortality_test = filter_time_MFx$qsup95 }
-      
-      if(quiet == FALSE){
-        cat(quantile, i,"accuracy:", abs(theoretical_X - value_mortality_test), " with multiplication factor:",  MFx_test, "\n")
-      }
-      
-      i = i + 1
-      if(theoretical_X - value_mortality_test < 0){
-        MFx_min = MFx_test
-        MFx_test = MFx_test + (MFx_max - MFx_min)/2
-      }
-      if(theoretical_X - value_mortality_test > 0){
-        MFx_max = MFx_test
-        MFx_test = MFx_test - (MFx_max - MFx_min)/2
-      }
-      if(MFx_test == max(MFx_range)){
-        MFx_test <- NULL
-        warning(paste("For", quantile, ", the multiplication factor is over the bound of", max(MFx_range)))
-        break
-      }
-      if(i > threshold_iter){
-        MFx_test <- NULL
-        warning(paste("For", quantile, ", the number of iterations reached the threshold number of iterations of", threshold_iter))
-        break
-      }
-    }
-    k <- 1:length(MFx)
-    
-    return(list(k = k,
-                MFx = MFx,
-                ls_predict = ls_predict,
-                ls_data_predict = ls_data_predict))
-  }
-  
-  
-  
-  
+#' Predict x\% Multiplication Factor at any specified time point for 
+#' a \code{survFit} object.
+#' 
+#' The function \code{MFx}, \eqn{x}\% Multiplication Factor at time \eqn{t}, (\eqn{MF(x,t)}),
+#' is used to compute the multiplication factor
+#' applied to the concentration exposure profile in order to
+#' reduce by \eqn{x}\% (argument \code{X}) the survival probability at a
+#'  specified test duration \eqn{t} (argument \code{time_MFx}) (default is the maximum
+#'  time point of the experiment).
+#'  
+#'  Mathematical definition of \eqn{x}\% Multiplication Factor at time \eqn{t}
+#'  (at the end of a time series \eqn{T = \{0, \dots, t\}}),
+#'  denoted \eqn{MF(x,t)}, is given by:
+#'  
+#'  \eqn{S(MF(x,t) * C_w(\tau \in T), t) = S( C_w(\tau \in T), t)*(1- x/100)},
+#'  
+#'  where \eqn{C_w(\tau \in T)} is the initial exposure profile without
+#'  multiplication factor. And so the expression \eqn{S(MF(x,t)* C_w(\tau \in T), t)}
+#'   is the survival probability after an exposure profile
+#'    \eqn{MF(x,t)* C_w(\tau \in T)} at time \eqn{t}.
+#'   
+#' @rdname MFx
+#' 
+#' @param object An object of class \code{survFit}.
+#' @param data_predict A dataframe with two columns \code{time} and \code{conc}.
+#' @param X Percentage of survival change (e.g., \eqn{50} for survival decrease of 50\%
+#'  , or \eqn{-50} for survival increase of 50\%).The default is 50. 
+#'  Only time series computed during the adaptation using a binary search in
+#'  \eqn{O(log(n))} are returned. However, if \code{NULL}, all time series
+#'  computed from the vector \code{MFx_range} are returned.
+#' @param time_MFx A number giving the time at which  \eqn{MF(x,t)} has to be estimated. 
+#' If NULL, the latest time point of the profile is used.
+#' @param MFx_range A vector from which lower and upper bound of the range of the
+#'  multiplication factor \code{MFx} are generated. The default is a vector \code{c(0, 1000)}.
+#' If argument \code{X} is \code{NULL}, then all the time series generated with
+#' \code{MFx_range} are returned.
+#' @param mcmc_size Can be used to reduce the number of MCMC samples in order to speed up
+#'  the computation. The default is 1000.
+#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into
+#'  account from the posterior.
+#' If \code{FALSE}, parameter \code{hb} is set to 0. The default is \code{TRUE}.
+#' @param spaghetti If \code{TRUE}, return a set of survival curves using
+#' parameters drawn from the posterior distribution.
+#' @param accuracy Accuracy of the multiplication factor. The default is 0.01.
+#' @param quiet If \code{FALSE}, print the evolution of accuracy.
+#' @param threshold_iter Threshold number of iteration.
+#' @param hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}.
+#' @param ode IF \code{ode} is \code{TRUE}, algo use predict_ode rather than predict. Default is \code{TRUE}.
+#' @param interpolate_length Length of the time sequence for which output is wanted.
+#' @param interpolate_method The interpolation method for concentration. See package \code{deSolve} for details.
+#' Default is \code{linear}.
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @return The function returns an object of class \code{MFx}, which is a list
+#'  with the following information:
+#'  \item{X_prop}{Survival probability for \code{X} percent of reduction of the initial median 
+#' survival probability at time \code{time_MFx}.}
+#' \item{X_prop_provided}{A number giving the proportion of reduction in survival.}
+#' \item{time_MFx}{A number giving the time at which  \eqn{MF(x,t)} has to be
+#'  estimated as provided in arguments or if NULL, the latest time point of the
+#'   profile is used.}
+#' \item{df_MFx}{A \code{data.frame} with quantiles (median, 2.5\% and 97.5\%)
+#'  of \eqn{MF(x,t)} at time \eqn{t}, \code{time_MFx}, for \eqn{x}\% of survival reduction.}
+#' \item{df_dose}{A \code{data.frame} with quantiles (median, 2.5\% and 97.5\%)
+#'  of survival probability along the computed multiplication factor and at time \code{time_MFx}.}
+#' \item{MFx_tested}{A vector of all multiplication factors computed.} 
+#' \item{ls_predict}{A list of all object of class \code{survFitPredict} obtained
+#' from computing survival probability for every profiles build from the vector of
+#' multiplication factors \code{MFx_tested}.}
+#' 
+#' 
+#' @export
+#' 
+MFx.survFit <- function(object,
+                        data_predict,
+                        X = 50,
+                        time_MFx = NULL,
+                        MFx_range = c(0,1000),
+                        mcmc_size = 1000,
+                        hb_value = TRUE,
+                        spaghetti = FALSE,
+                        accuracy = 0.01,
+                        quiet = FALSE,
+                        threshold_iter = 100,
+                        hb_valueFORCED = 0,
+                        ode=TRUE,
+                        interpolate_length = NULL,
+                        interpolate_method = "linear",
+                        ...){
+  
+  ## Analyse data_predict data.frame
+  if(!all(colnames(data_predict) %in% c("conc", "time")) || ncol(data_predict) != 2){
+    stop("The argument 'data_predict' is a dataframe with two columns 'time' and 'conc'.")
+  }
+  
+  ## Check time_MFx
+  if(is.null(time_MFx))  time_MFx = max(data_predict$time)
+
+  if(!(time_MFx %in% data_predict$time)){
+    stop("Please provide a 'time_MFx' corresponding to a time-point at which concentration is provided.
+            Interpolation of concentration is too specific to be automatized.")
+  }
+  
+  ls_data_predict <- list()
+  ls_predict <- list()
+
+  ls_data_predict[[1]] <- data_predict
+  ls_data_predict[[1]]$replicate <- rep("predict_MFx_1", nrow(data_predict))
+  
+  if(ode == TRUE){
+    ls_predict[[1]] <- predict_ode( object = object,
+                                data_predict = ls_data_predict[[1]],
+                                spaghetti = spaghetti,
+                                mcmc_size = mcmc_size,
+                                hb_value = hb_value,
+                                hb_valueFORCED = hb_valueFORCED,
+                                interpolate_length = interpolate_length,
+                                interpolate_method = interpolate_method)
+  } else{
+    ls_predict[[1]] <- predict( object = object,
+                                data_predict = ls_data_predict[[1]],
+                                spaghetti = spaghetti,
+                                mcmc_size = mcmc_size,
+                                hb_value = hb_value,
+                                hb_valueFORCED = hb_valueFORCED )
+  }
+  
+
+
+  filter_time_MFx = dplyr::filter(ls_predict[[1]]$df_quantile, time == time_MFx)
+
+  median_Mortality_test <- filter_time_MFx$q50
+  theoretical_X <- (100 - X) / 100 * filter_time_MFx$q50 # Necessary to compared with accuracy
+
+  if(!is.null(X)){
+    binarySearch_MFx_q50 <- binarySearch_MFx(object = object,
+                                             spaghetti = spaghetti,
+                                             mcmc_size = mcmc_size,
+                                             hb_value = hb_value,
+                                             MFx_range = MFx_range,
+                                             time_MFx = time_MFx,
+                                             theoretical_X = theoretical_X,
+                                             value_mortality_test = median_Mortality_test,
+                                             accuracy = accuracy,
+                                             data_predict = data_predict,
+                                             ls_data_predict = ls_data_predict,
+                                             ls_predict = ls_predict,
+                                             quiet = quiet,
+                                             quantile = "q50",
+                                             threshold_iter = threshold_iter,
+                                             hb_valueFORCED = hb_valueFORCED,
+                                             ode = ode) # "q50", "qinf95", "qsup95"
+    binarySearch_MFx_qinf95 <- binarySearch_MFx(object = object,
+                                                spaghetti = spaghetti,
+                                                mcmc_size = mcmc_size,
+                                                hb_value = hb_value,
+                                                MFx_range = MFx_range,
+                                                time_MFx = time_MFx,
+                                               theoretical_X = theoretical_X,
+                                               value_mortality_test = filter_time_MFx$qinf95,
+                                               accuracy = accuracy,
+                                               data_predict = data_predict,
+                                               ls_data_predict = ls_data_predict,
+                                               ls_predict = ls_predict,
+                                               quiet = quiet,
+                                               quantile = "qinf95",
+                                               threshold_iter = threshold_iter,
+                                               hb_valueFORCED = hb_valueFORCED,
+                                               ode = ode) # "q50", "qinf95", "qsup95"
+    binarySearch_MFx_qsup95 <- binarySearch_MFx(object = object,
+                                                spaghetti = spaghetti,
+                                                mcmc_size = mcmc_size,
+                                                hb_value = hb_value,
+                                                MFx_range = MFx_range,
+                                                time_MFx = time_MFx,
+                                               theoretical_X = theoretical_X,
+                                               value_mortality_test = filter_time_MFx$qsup95,
+                                               accuracy = accuracy,
+                                               data_predict = data_predict,
+                                               ls_data_predict = ls_data_predict,
+                                               ls_predict = ls_predict,
+                                               quiet = quiet,
+                                               quantile = "qsup95",
+                                               threshold_iter = threshold_iter,
+                                               hb_valueFORCED = hb_valueFORCED,
+                                               ode = ode) # "q50", "qinf95", "qsup95"
+    
+    #
+    # Make a dataframe with quantile of all generated time series
+    #
+    
+    ls_predict_quantile_q50 <- lapply(binarySearch_MFx_q50$k, function(kit){
+      df_quantile <- binarySearch_MFx_q50$ls_predict[[kit]]$df_quantile
+      df_quantile$MFx <- rep(binarySearch_MFx_q50$MFx[[kit]], nrow(binarySearch_MFx_q50$ls_predict[[kit]]$df_quantile))
+      return(df_quantile)
+    })
+    ls_predict_quantile_qinf95 <- lapply(binarySearch_MFx_qinf95$k, function(kit){
+      df_quantile <- binarySearch_MFx_qinf95$ls_predict[[kit]]$df_quantile
+      df_quantile$MFx <- rep(binarySearch_MFx_qinf95$MFx[[kit]], nrow(binarySearch_MFx_qinf95$ls_predict[[kit]]$df_quantile))
+      return(df_quantile)
+    })
+    ls_predict_quantile_qsup95 <- lapply(binarySearch_MFx_qsup95$k, function(kit){
+      df_quantile <- binarySearch_MFx_qsup95$ls_predict[[kit]]$df_quantile
+      df_quantile$MFx <- rep(binarySearch_MFx_qsup95$MFx[[kit]], nrow(binarySearch_MFx_qsup95$ls_predict[[kit]]$df_quantile))
+      return(df_quantile)
+    })
+    
+    predict_MFx_quantile_q50 <- do.call("rbind", ls_predict_quantile_q50)
+    predict_MFx_quantile_qinf95 <- do.call("rbind", ls_predict_quantile_qinf95)
+    predict_MFx_quantile_qsup95 <- do.call("rbind", ls_predict_quantile_qsup95)
+    #
+    # doseResponse dataframe at specific time_MFx
+    #
+    df_dose_q50 <- dplyr::filter(predict_MFx_quantile_q50, time == time_MFx)
+    df_dose_q50$id = rep("q50", nrow(df_dose_q50))
+    df_dose_qinf95 <- dplyr::filter(predict_MFx_quantile_qinf95, time == time_MFx)
+    df_dose_qinf95$id = rep("qinf95", nrow(df_dose_qinf95))
+    df_dose_qsup95 <- dplyr::filter(predict_MFx_quantile_qsup95, time == time_MFx)
+    df_dose_qsup95$id = rep("qsup95", nrow(df_dose_qsup95))
+    
+    ## Additional element to return
+    df_dose <- do.call("rbind", list(df_dose_q50, df_dose_qinf95, df_dose_qsup95))
+    MFx <- binarySearch_MFx_q50$MFx
+    ls_predict <- binarySearch_MFx_q50$ls_predict
+    
+  }
+  if(is.null(X)){
+    theoretical_X = NULL # to return in the final object
+    
+    MFx <- MFx_range
+    
+    k <- 1:length(MFx_range)
+
+    ls_data_predict <- lapply(k, function(kit){
+      profil_test <- data_predict
+      profil_test$conc <- MFx[kit] * data_predict$conc
+      profil_test$replicate <- rep(paste0("predict_MFx_", MFx[kit]), nrow(data_predict))
+      return(profil_test)
+    })
+    
+    ls_predict <- lapply(k, function(kit){
+      if(ode == TRUE){
+        predict_ode(object = object,
+                data_predict = ls_data_predict[[kit]],
+                spaghetti = spaghetti,
+                mcmc_size = mcmc_size,
+                hb_value = hb_value,
+                hb_valueFORCED = hb_valueFORCED)
+      } else{
+        predict(object = object,
+                data_predict = ls_data_predict[[kit]],
+                spaghetti = spaghetti,
+                mcmc_size = mcmc_size,
+                hb_value = hb_value,
+                hb_valueFORCED = hb_valueFORCED)
+      }
+
+    })
+    
+    #
+    # Make a dataframe with quantile of all generated time series
+    #
+    
+    ls_predict_quantile <- lapply(k, function(kit){
+      df_quantile <- ls_predict[[kit]]$df_quantile
+      df_quantile$MFx <- rep(MFx[[kit]], nrow(ls_predict[[kit]]$df_quantile))
+      return(df_quantile)
+    })
+    predict_MFx_quantile <- do.call("rbind", ls_predict_quantile)
+    
+    df_dose <- dplyr::filter(predict_MFx_quantile, time == time_MFx)
+    
+  }
+  
+  #
+  # Compute table with the optimal MFx obtained if X != NULL
+  #
+  if(!is.null(X)){
+    
+    MFx_q50 = df_dose_q50$MFx[nrow(df_dose_q50)]
+    MFx_qinf95 = df_dose_qinf95$MFx[nrow(df_dose_qinf95)]
+    MFx_qsup95 = df_dose_qsup95$MFx[nrow(df_dose_qsup95)]
+    # Compute MFx q95:
+    # pts_MFx <- pointsMFx(df_dose, median_Mortality_test)
+    # MFx_qinf95 <- pts_MFx$MFx_qinf95
+    # MFx_qsup95 <- pts_MFx$MFx_qsup95
+    # 
+    # Return dataframe of quantiles MFx
+    
+    df_MFx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
+                         MFx = c(MFx_q50, MFx_qinf95, MFx_qsup95))
+  } else{
+    df_MFx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
+                         MFx = c(NA, NA, NA))
+  }
+  
+# warning("This is not an error message:
+# Just take into account that MFx as been estimated with a binary
+# search using the 'accuracy' argument. To improve the shape of the curve, you
+# can use X = NULL, and computed time series around the median MFx, with the
+#           vector `MFx_range`.")
+  
+  ls_out = list(X_prop = theoretical_X,
+                X_prop_provided = X/100,
+                time_MFx = time_MFx,
+                df_MFx = df_MFx,
+                df_dose = df_dose, # return MFx at specific time
+                MFx_tested = MFx,
+                ls_predict = ls_predict)
+  
+  class(ls_out) = c("list", "MFx")
+  
+  return(ls_out)
+}
+
+
+
+
+##########################
+#
+#
+#
+
+#
+# binary search of MFx in O(log n)
+#
+
+binarySearch_MFx <- function(object,
+                             spaghetti,
+                             mcmc_size,
+                             hb_value,
+                             MFx_range,
+                             time_MFx,
+                             theoretical_X,
+                             value_mortality_test,
+                             accuracy,
+                             data_predict,
+                             ls_data_predict,
+                             ls_predict,
+                             quiet,
+                             quantile, # "q50", "qinf95", "qsup95"
+                             threshold_iter,
+                             hb_valueFORCED,
+                             ode
+                             ){
+    #
+    # binary search of MFx in O(log n)
+    #
+    i = 1
+    MFx = 1
+    MFx_min = min(MFx_range)
+    MFx_max = max(MFx_range)
+    MFx_test = max(MFx_range)
+    
+    while(abs(theoretical_X - value_mortality_test) > accuracy){
+      
+      MFx = c(MFx, MFx_test)
+      
+      ls_data_predict[[i+1]] <- data_predict
+      ls_data_predict[[i+1]]$conc <- MFx_test * data_predict$conc
+      ls_data_predict[[i+1]]$replicate <- rep(paste0("predict_MFx_", MFx_test), nrow(data_predict))
+      
+      if(ode == TRUE){
+        ls_predict[[i+1]] <- predict_ode(object = object,
+                                     data_predict = ls_data_predict[[i+1]],
+                                     spaghetti = spaghetti,
+                                     mcmc_size = mcmc_size,
+                                     hb_value = hb_value,
+                                     hb_valueFORCED = hb_valueFORCED)
+      } else{
+        ls_predict[[i+1]] <- predict(object = object,
+                                     data_predict = ls_data_predict[[i+1]],
+                                     spaghetti = spaghetti,
+                                     mcmc_size = mcmc_size,
+                                     hb_value = hb_value,
+                                     hb_valueFORCED = hb_valueFORCED)
+      }
+      
+      
+      filter_time_MFx = dplyr::filter(ls_predict[[i+1]]$df_quantile, time == time_MFx)
+      if(quantile == "q50"){ value_mortality_test = filter_time_MFx$q50 }
+      if(quantile == "qinf95"){ value_mortality_test = filter_time_MFx$qinf95 }
+      if(quantile == "qsup95"){ value_mortality_test = filter_time_MFx$qsup95 }
+      
+      if(quiet == FALSE){
+        cat(quantile, i,"accuracy:", abs(theoretical_X - value_mortality_test), " with multiplication factor:",  MFx_test, "\n")
+      }
+      
+      i = i + 1
+      if(theoretical_X - value_mortality_test < 0){
+        MFx_min = MFx_test
+        MFx_test = MFx_test + (MFx_max - MFx_min)/2
+      }
+      if(theoretical_X - value_mortality_test > 0){
+        MFx_max = MFx_test
+        MFx_test = MFx_test - (MFx_max - MFx_min)/2
+      }
+      if(MFx_test == max(MFx_range)){
+        MFx_test <- NULL
+        warning(paste("For", quantile, ", the multiplication factor is over the bound of", max(MFx_range)))
+        break
+      }
+      if(i > threshold_iter){
+        MFx_test <- NULL
+        warning(paste("For", quantile, ", the number of iterations reached the threshold number of iterations of", threshold_iter))
+        break
+      }
+    }
+    k <- 1:length(MFx)
+    
+    return(list(k = k,
+                MFx = MFx,
+                ls_predict = ls_predict,
+                ls_data_predict = ls_data_predict))
+  }
+  
+  
+  
+  
diff --git a/R/MFx_ode.survFit.R b/R/MFx_ode.survFit.R
index ba8c882f9c710bb3d3c941848a1eb4aad0c3f8ed..cc0b0126c15f973c5ec3a05938079f089ded4d54 100644
--- a/R/MFx_ode.survFit.R
+++ b/R/MFx_ode.survFit.R
@@ -80,14 +80,6 @@ MFx_ode <- function(object, ...){
 #' from computing survival probability for every profiles build from the vector of
 #' multiplication factors \code{MFx_tested}.}
 #' 
-#' @examples 
-#' 
-#' # (1) Load the data
-#' data("propiconazole")
-#' 
-#' # (2) Create an object of class 'survData'
-#' dataset <- survData(propiconazole)
-#' 
 #' 
 #' @export
 #' 
@@ -112,14 +104,14 @@ This can take a very long time to compute (minutes to hours).\n
 Prefer the function 'MFx' when possible.")
   
   ## Analyse data_predict data.frame
-  if (!all(colnames(data_predict) %in% c("conc", "time")) || ncol(data_predict) != 2) {
+  if(!all(colnames(data_predict) %in% c("conc", "time")) || ncol(data_predict) != 2){
     stop("The argument 'data_predict' is a dataframe with two columns 'time' and 'conc'.")
   }
   
   ## Check time_MFx
-  if (is.null(time_MFx))  time_MFx = max(data_predict$time)
+  if(is.null(time_MFx))  time_MFx = max(data_predict$time)
   
-  if (!(time_MFx %in% data_predict$time)) {
+  if(!(time_MFx %in% data_predict$time)){
     stop("Please provide a 'time_MFx' corresponding to a time-point at which concentration is provided.
          Interpolation of concentration is too specific to be automatized.")
   }
@@ -317,51 +309,6 @@ Prefer the function 'MFx' when possible.")
   }
 
 
-# points for LCx
-# 
-# 
-# pointsMFx <- function(df_dose, X_prop){
-#   
-#   if(min(df_dose$qinf95) < X_prop & X_prop < max(df_dose$qinf95)){
-#     df.qinf95 <- select(df_dose, c(MFx, qinf95))%>%
-#       dplyr::add_row(qinf95 = X_prop)%>%
-#       dplyr::arrange(qinf95)%>%
-#       dplyr::mutate(MFx = na.approx(MFx, qinf95, na.rm = FALSE))%>%
-#       dplyr::filter(qinf95 == X_prop)
-#     
-#     MFx_qinf95 <- df.qinf95$MFx
-#     
-#   } else {
-#     MFx_qinf95 <- NA
-#     
-#     warning(paste("No 95%inf for survival probability of", X_prop ,
-#                   " in the range of multiplication factors under consideration: [",
-#                   min(df_dose$MFx), ";", max(df_dose$MFx), "]"))
-#   }
-#   
-#   if(min(df_dose$qsup95) < X_prop & X_prop < max(df_dose$qsup95)){
-#     df.qsup95 <- select(df_dose, c(MFx,qsup95)) %>%
-#       add_row(qsup95 = X_prop) %>%
-#       arrange(qsup95) %>%
-#       mutate(MFx = na.approx(MFx,qsup95, na.rm = FALSE)) %>%
-#       filter(qsup95 == X_prop)
-#     
-#     MFx_qsup95 <- df.qsup95$MFx
-#     
-#   } else {
-#     
-#     MFx_qsup95 <- NA
-#     warning(paste("No 95%sup for survival probability of", X_prop,
-#                   " in the range of multiplication factors under consideration: [",
-#                   min(df_dose$MFx), ";", max(df_dose$MFx), "]"))
-#   }
-#   
-#   return(list(MFx_qinf95 = MFx_qinf95,
-#               MFx_qsup95 = MFx_qsup95))
-# }
-
-
-
 ##########################
 #
 #
diff --git a/R/plot.LCx.R b/R/plot.LCx.R
index 746db221484a7c3e2b20629db375d8dbb628a7cb..af9faea2c8a4c72cd6fe047870837068a228d0d6 100644
--- a/R/plot.LCx.R
+++ b/R/plot.LCx.R
@@ -15,30 +15,9 @@
 #' @keywords plot
 #' 
 #' @return a plot of class \code{ggplot}
-#' 
-#' @examples 
-#' 
-#' # (1) Load the data
-#' data("propiconazole")
-#' 
-#' # (2) Create an object of class 'survData'
-#' dataset <- survData(propiconazole)
-#' 
-#' \donttest{
-#' # (3) Run the survFit function with model_type SD (or IT)
-#' out_SD <- survFit(dataset, model_type = "SD")
-#' 
-#' # (4) estimate LC50 at time 4
-#' LCx_SD <- LCx(out_SD, X = 50, time_LCx = 4)
-#' 
-#' # (5) plot the object of class 'LCx'
-#' plot(LCx_SD)
-#' }
 #'
 #' @export
 #'
-#'
-#'
 plot.LCx <- function(x,
                      xlab = "Concentration",
                      ylab = "Survival probability \n median and 95 CI",
diff --git a/R/plot.MFx.R b/R/plot.MFx.R
index 120ff54449a88160d41d55318f345b74f44e3778..93f72062affa260c0d1dbb6ad0eaf53bd2de5b13 100644
--- a/R/plot.MFx.R
+++ b/R/plot.MFx.R
@@ -19,12 +19,9 @@
 #' @keywords plot
 #' 
 #' @return a plot of class \code{ggplot}
-#' 
 #'
 #' @export
 #'
-#'
-#'
 plot.MFx <- function(x,
                      x_variable = "MFx", # other option is "Time"
                      xlab = NULL,
diff --git a/R/plot.reproData.R b/R/plot.reproData.R
index e92e2fef5a82520e80454710c6f881c8dae94776..5dff09bddbfd798b5692d6923eb2aade9538ce0b 100644
--- a/R/plot.reproData.R
+++ b/R/plot.reproData.R
@@ -1,101 +1,88 @@
-#' Plotting method for \code{reproData} objects
-#'
-#' This is the generic \code{plot} S3 method for the \code{reproData} class.
-#' It plots the cumulated number of offspring as a function of time.
-#'
-#' @param x an object of class \code{reproData}
-#' @param xlab label of the \eqn{X}-axis
-#' @param ylab label of the \eqn{Y}-axis, by default \code{Cumulated Number of offspring}
-#' @param main main title for the plot
-#' @param concentration a numeric value corresponding to some concentration in
-#' \code{data}. If \code{concentration = NULL}, draws a plot for each concentration
-#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
-#' @param pool.replicate if \code{TRUE}, the datapoints of each replicate are
-#' summed for a same concentration
-#' @param addlegend if \code{TRUE}, adds a default legend to the plot
-#' @param remove.someLabels if \code{TRUE}, removes 3/4 of X-axis labels in
-#' \code{'ggplot'} style to avoid the label overlap
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @note When \code{style = "generic"}, the function calls the generic function
-#' \code{\link[graphics]{plot}}
-#' @note When \code{style = "ggplot"}, the function return an object of class
-#'  \code{gg} and \code{ggplot}, see function \code{\link[ggplot2]{ggplot}}
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class 'reproData'
-#' cadmium1 <- reproData(cadmium1)
-#'
-#' # (3) Plot the reproduction data
-#' plot(cadmium1)
-#'
-#' # (4) Plot the reproduction data for a fixed concentration
-#' plot(cadmium1, concentration = 4.36, style = "generic")
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom methods is
-#' @importFrom stats aggregate
-#'
-#' @export
-plot.reproData <- function(x,
-                           xlab,
-                           ylab = "Cumulated Number of offspring",
-                           main = NULL,
-                           concentration = NULL,
-                           style = "ggplot",
-                           pool.replicate = FALSE,
-                           addlegend = FALSE,
-                           remove.someLabels = FALSE, ...) {
-  if (!is(x, "reproData"))
-    stop("plot.reproData: object of class reproData expected")
-
-  if (style == "generic" && remove.someLabels)
-    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
-            call. = FALSE)
-
-  if (is.null(concentration) && addlegend)
-    warning("'addlegend' argument is valid only when 'concentration' is not null.",
-            call. = FALSE)
-
-  if (pool.replicate) {
-    # agregate by sum of replicate
-    x <- cbind(aggregate(cbind(Nreprocumul, Nsurv, Ninit) ~ time + conc, x, sum),
-               replicate = 1)
-  }
-
-  if (is.null(concentration)) {
-    reproDataPlotFull(x, xlab, ylab, style, remove.someLabels)
-  }
-  else {
-    reproDataPlotFixedConc(x, xlab, ylab, main, concentration, style, addlegend,
-                           remove.someLabels)
-  }
-}
-
-reproDataPlotFull <- function(data, xlab, ylab, style = "generic",
-                              remove.someLabels) {
-  dataPlotFull(data, xlab, ylab, "Nreprocumul", style,
-               remove.someLabels)
-}
-
-
-reproDataPlotFixedConc <- function(x,
-                                   xlab,
-                                   ylab,
-                                   main,
-                                   concentration,
-                                   style = "generic",
-                                   addlegend = FALSE,
-                                   remove.someLabels = FALSE) {
-  dataPlotFixedConc(x, xlab, ylab, main, "Nreprocumul",
-                    concentration, style, addlegend, remove.someLabels)
-}
-
+#' Plotting method for \code{reproData} objects
+#'
+#' This is the generic \code{plot} S3 method for the \code{reproData} class.
+#' It plots the cumulated number of offspring as a function of time.
+#'
+#' @param x an object of class \code{reproData}
+#' @param xlab label of the \eqn{X}-axis
+#' @param ylab label of the \eqn{Y}-axis, by default \code{Cumulated Number of offspring}
+#' @param main main title for the plot
+#' @param concentration a numeric value corresponding to some concentration in
+#' \code{data}. If \code{concentration = NULL}, draws a plot for each concentration
+#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
+#' @param pool.replicate if \code{TRUE}, the datapoints of each replicate are
+#' summed for a same concentration
+#' @param addlegend if \code{TRUE}, adds a default legend to the plot
+#' @param remove.someLabels if \code{TRUE}, removes 3/4 of X-axis labels in
+#' \code{'ggplot'} style to avoid the label overlap
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @note When \code{style = "generic"}, the function calls the generic function
+#' \code{\link[graphics]{plot}}
+#' @note When \code{style = "ggplot"}, the function return an object of class
+#'  \code{gg} and \code{ggplot}, see function \code{\link[ggplot2]{ggplot}}
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom methods is
+#' @importFrom stats aggregate
+#'
+#' @export
+plot.reproData <- function(x,
+                           xlab,
+                           ylab = "Cumulated Number of offspring",
+                           main = NULL,
+                           concentration = NULL,
+                           style = "ggplot",
+                           pool.replicate = FALSE,
+                           addlegend = FALSE,
+                           remove.someLabels = FALSE, ...) {
+  if (!is(x, "reproData"))
+    stop("plot.reproData: object of class reproData expected")
+
+  if (style == "generic" && remove.someLabels)
+    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
+            call. = FALSE)
+
+  if (is.null(concentration) && addlegend)
+    warning("'addlegend' argument is valid only when 'concentration' is not null.",
+            call. = FALSE)
+
+  if (pool.replicate) {
+    # agregate by sum of replicate
+    x <- cbind(aggregate(cbind(Nreprocumul, Nsurv, Ninit) ~ time + conc, x, sum),
+               replicate = 1)
+  }
+
+  if (is.null(concentration)) {
+    reproDataPlotFull(x, xlab, ylab, style, remove.someLabels)
+  }
+  else {
+    reproDataPlotFixedConc(x, xlab, ylab, main, concentration, style, addlegend,
+                           remove.someLabels)
+  }
+}
+
+reproDataPlotFull <- function(data, xlab, ylab, style = "generic",
+                              remove.someLabels) {
+  dataPlotFull(data, xlab, ylab, "Nreprocumul", style,
+               remove.someLabels)
+}
+
+
+reproDataPlotFixedConc <- function(x,
+                                   xlab,
+                                   ylab,
+                                   main,
+                                   concentration,
+                                   style = "generic",
+                                   addlegend = FALSE,
+                                   remove.someLabels = FALSE) {
+  dataPlotFixedConc(x, xlab, ylab, main, "Nreprocumul",
+                    concentration, style, addlegend, remove.someLabels)
+}
+
diff --git a/R/plot.reproFitTT.R b/R/plot.reproFitTT.R
index b1e1e7fef025724c6b40a09ac2565ce3087d20f5..6606e0d265a714b3b7bef7c7399d0d96fccc1886 100644
--- a/R/plot.reproFitTT.R
+++ b/R/plot.reproFitTT.R
@@ -1,424 +1,404 @@
-#' Plotting method for \code{reproFitTT} objects
-#' 
-#' This is the generic \code{plot} S3 method for the \code{reproFitTT} class.
-#' It plots the concentration-effect fit under target time reproduction
-#' analysis.
-#' 
-#' The fitted curve represents the \strong{estimated reproduction rate} at the target time
-#'  as a function of the chemical compound concentration.
-#' The function plots 95\% credible intervals for the estimated reproduction
-#' rate (by default the grey area around the fitted curve). Typically
-#' a good fit is expected to display a large overlap between the two types of intervals.
-#' If spaghetti = TRUE, the credible intervals are represented by two dotted
-#' lines limiting the credible band, and a spaghetti plot is added to this band.
-#' It consists of the representation of simulated curves using parameter values
-#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
-#' taken for this sample).
-#'
-#' @param x an object of class \code{reproFitTT}
-#' @param xlab a label for the \eqn{X}-axis, by default \code{Concentration}
-#' @param ylab a label for the \eqn{Y}-axis, by default \code{Nb of offspring per ind/day}
-#' @param main main title for the plot
-#' @param fitcol color of the fitted curve
-#' @param fitlty line type of the fitted curve
-#' @param fitlwd width of the fitted curve
-#' @param spaghetti if \code{TRUE}, the credible interval is represented by 
-#' multiple curves
-#' @param cicol color of the 95 \% credible limits
-#' @param cilty line type of the 95 \% credible limits
-#' @param cilwd width of the 95 \% credible limits
-#' @param ribcol color of the ribbon between lower and upper credible limits.
-#' Transparent if \code{NULL}
-#' @param addlegend if \code{TRUE}, adds a default legend to the plot
-#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
-#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @note When \code{style = "generic"}, the function calls the generic function
-#' \code{\link[graphics]{plot}}
-#' @note When \code{style = "ggplot"}, the function return an object of class
-#'  \code{ggplot}, see function \code{\link[ggplot2]{ggplot}} 
-#'
-#' 
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom gridExtra grid.arrange arrangeGrob
-#' @importFrom grid grid.rect gpar
-#' @importFrom graphics plot axis legend lines par points polygon
-#' segments title
-#' @importFrom reshape2 melt
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#' 
-#' @examples
-#'
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "reproData"
-#' dataset <- reproData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the reproFitTT function with the log-logistic gamma-Poisson model
-#' out <- reproFitTT(dataset, stoc.part = "gammapoisson",
-#'                   ecx = c(5, 10, 15, 20, 30, 50, 80), quiet = TRUE)
-#'
-#'
-#' # (4) Plot the fitted curve with generic style
-#' plot(out, xlab = expression("Concentration in" ~ mu~g.L^{-1}),
-#'      fitcol = "blue", cicol = "lightblue",
-#'      main = "Log-logistic response to concentration")
-#' }
-#' 
-#' @export
-plot.reproFitTT <- function(x,
-                            xlab = "Concentration",
-                            ylab = "Nb of offspring per ind/day",
-                            main = NULL,
-                            fitcol = "orange",
-                            fitlty = 1,
-                            fitlwd = 1,
-                            spaghetti = FALSE,
-                            cicol = "orange",
-                            cilty = 2,
-                            cilwd = 1,
-                            ribcol = "grey70",
-                            addlegend = FALSE,
-                            log.scale = FALSE,
-                            style = "ggplot", ...) {
-  # plot the fitted curve estimated by reproFitTT
-  # INPUTS
-  # - x:  reproFitTT object
-  # - xlab : label x
-  # - ylab : label y
-  # - main : main title
-  # - fitcol : color fitted curve
-  # - fitlty : type line fitted curve
-  # - fitlwd : width line fitted curve
-  # - cicol : color ci
-  # - cilty : type line ci
-  # - cilwd : width line ci
-  # - addlegend : boolean
-  # - log.scale : x log option
-  # - style : generic ou ggplot
-  # OUTPUT:
-  # - plot of fitted regression
-  
-  # Selection of datapoints that can be displayed given the type of scale
-  sel <- if (log.scale) x$dataTT$conc > 0 else TRUE
-  
-  dataTT <- x$dataTT[sel, ]
-  dataTT$resp <- dataTT$Nreprocumul / dataTT$Nindtime
-  transf_data_conc <- optLogTransform(log.scale, dataTT$conc)
-  
-  # Concentration values used for display in linear scale
-  display.conc <- (function() {
-    x <- optLogTransform(log.scale, dataTT$conc)
-    s <- seq(min(x),max(x), length = 100)
-    if(log.scale) exp(s) else s
-  })()
-  
-  # Possibly log transformed concentration values for display
-  curv_conc <- optLogTransform(log.scale, display.conc)
-  
-  cred.int <- reproMeanCredInt(x, display.conc)
-  
-  spaghetti.CI <- if (spaghetti) { reproSpaghetti(x, display.conc) } else NULL
-  dataCIm <- if (spaghetti) { melt(cbind(curv_conc, spaghetti.CI),
-                                   id.vars = c("curv_conc", "conc"))} else NULL
-  
-  curv_resp <- data.frame(conc = curv_conc, resp = cred.int[["q50"]],
-                          Line = "loglogistic")
-  
-  # ylim
-  ylim_CI <- if (spaghetti) { max(dataCIm$value, cred.int$qsup95)
-  } else {
-    max(cred.int$qsup95)
-  }
-
-  if (style == "generic") {
-    reproFitPlotGenericCredInt(x, dataTT$conc, transf_data_conc, dataTT$resp,
-                               curv_conc, curv_resp,
-                               cred.int, spaghetti.CI, dataCIm,
-                               xlab, ylab, fitcol, fitlty, fitlwd,
-                               main, addlegend,
-                               cicol, cilty, cilwd, ribcol, log.scale, ylim_CI)
-  }
-  else if (style == "ggplot") {
-    reproFitPlotGG(x, dataTT$conc, transf_data_conc, dataTT$resp,
-                   curv_conc, curv_resp,
-                   cred.int, spaghetti.CI, dataCIm,
-                   xlab, ylab, fitcol, fitlty, fitlwd,
-                   main, addlegend,
-                   cicol, cilty, cilwd, ribcol, log.scale, ylim_CI)
-  }
-  else stop("Unknown style")
-}
-
-#' @importFrom stats quantile rgamma
-reproMeanCredInt <- function(fit, x) {
-  # create the parameters for credible interval for the log logistic model
-  # INPUT:
-  # - fit : object of class reproFitTT
-  # - x : vector of concentrations values (x axis)
-  # OUTPUT:
-  # - ci : credible limit
-  
-  mctot <- do.call("rbind", fit$mcmc)
-  k <- nrow(mctot)
-  # parameters
-  d2 <- mctot[, "d"]
-  log10b2 <- mctot[, "log10b"]
-  b2 <- 10^log10b2
-  log10e2 <- mctot[, "log10e"]
-  e2 <- 10^log10e2
-  
-  # quantiles
-  qinf95 = NULL
-  q50 = NULL
-  qsup95 = NULL
-  
-  # poisson
-  if (fit$model.label == "P") {
-    for (i in 1:length(x)) {
-      theomean <- d2 / (1 + (x[i] / e2)^(b2)) # mean curve
-      # IC 95%
-      qinf95[i] <- quantile(theomean, probs = 0.025, na.rm = TRUE)
-      qsup95[i] <- quantile(theomean, probs = 0.975, na.rm = TRUE)
-      q50[i] <- quantile(theomean, probs = 0.5, na.rm = TRUE)
-    }
-  }
-  
-  # gamma poisson
-  else if (fit$model.label == "GP") {
-    # parameters
-    log10omega2 <- mctot[, "log10omega"]
-    omega2 <- 10^(log10omega2)
-    
-    for (i in 1:length(x)) {
-      theomean <- d2 / (1 + (x[i] / e2)^(b2)) # mean curve
-      theo <- rgamma(n = k, shape = theomean / omega2, rate = 1 / omega2)
-      # IC 95%
-      qinf95[i] <- quantile(theo, probs = 0.025, na.rm = TRUE)
-      qsup95[i] <- quantile(theo, probs = 0.975, na.rm = TRUE)
-      q50[i] <- quantile(theo, probs = 0.5, na.rm = TRUE)
-    }
-  }
-  # values for cred.int
-  ci <- data.frame(qinf95 = qinf95,
-                   q50 = q50,
-                   qsup95 = qsup95)
-  
-  return(ci)
-}
-
-reproSpaghetti <- function(fit, x) {
-  mctot <- do.call("rbind", fit$mcmc)
-  sel <- sample(nrow(mctot))[1:ceiling(nrow(mctot) / 10)]
-  k <- nrow(mctot[sel,])
-  # parameters
-  d2 <- mctot[, "d"][sel]
-  log10b2 <- mctot[, "log10b"][sel]
-  b2 <- 10^log10b2
-  log10e2 <- mctot[, "log10e"][sel]
-  e2 <- 10^log10e2
-  if (fit$model.label == "GP") {
-    log10omega2 <- mctot[, "log10omega"][sel]
-    omega2 <- 10^(log10omega2)
-  }
-  
-  # all theorical
-  dtheo <- array(data = NA, dim = c(length(x), length(e2)))
-  if (fit$model.label == "GP") dtheotemp <- dtheo
-  for (i in 1:length(e2)) {
-    if (fit$model.label == "P") {
-      dtheo[, i] <- d2[i] / (1 + (x / e2[i])^(b2[i])) # mean curve
-    }
-    else if (fit$model.label == "GP") {
-      dtheotemp[, i] <- d2[i] / (1 + (x / e2[i])^(b2[i])) # mean curve
-      dtheo[, i] <- rgamma(n = length(x), shape = dtheotemp[, i] / omega2[i],
-                           rate = 1 / omega2[i])
-    }
-  }
-  dtheof <- as.data.frame(cbind(x, dtheo))
-  names(dtheof) <- c("conc", paste0("X", 1:length(sel)))
-  
-  return(dtheof)
-}
-
-#' @importFrom epitools pois.exact
-reproFitPlotGenericCredInt <- function(x, data_conc, transf_data_conc, data_resp,
-                                       curv_conc, curv_resp,
-                                       cred.int, spaghetti.CI, dataCIm,
-                                       xlab, ylab, fitcol, fitlty, fitlwd,
-                                       main, addlegend,
-                                       cicol, cilty, cilwd, ribcol, log.scale, ylim_CI) {
-
-  # plot the fitted curve estimated by reproFitTT
-  # with generic style with credible interval
-  
-  plot(transf_data_conc, data_resp,
-       xlab = xlab,
-       ylab = ylab,
-       main = main,
-       xaxt = "n",
-       yaxt = "n",
-       ylim = c(0, ylim_CI + 0.01),
-       type = "n")
-  
-  # axis
-  axis(side = 2, at = pretty(c(0, ylim_CI)))
-  axis(side = 1,
-       at = transf_data_conc,
-       labels = data_conc)
-  
-  # Plotting the theoretical curve
-  # cred.int ribbon + lines
-  if (!is.null(spaghetti.CI)) {
-    color <- "gray"
-    color_transparent <- adjustcolor(color, alpha.f = 0.05)
-    by(dataCIm, dataCIm$variable, function(x) {
-      lines(x$curv_conc, x$value, col = color_transparent)
-    })
-  } else {
-    polygon(c(curv_conc, rev(curv_conc)), c(cred.int[["qinf95"]],
-                                            rev(cred.int[["qsup95"]])),
-            col = ribcol, border = NA)
-  }
-  
-  lines(curv_conc, cred.int[["qsup95"]], type = "l", col = cicol, lty = cilty,
-        lwd = cilwd)
-  lines(curv_conc, cred.int[["qinf95"]], type = "l", col = cicol, lty = cilty,
-        lwd = cilwd)
-  
-  # fitted curve
-  lines(curv_conc, curv_resp[, "resp"], col = fitcol,
-        lty = fitlty, lwd = fitlwd, type = "l")
-  
-  # legend
-  if(addlegend)  {
-    legend("bottomleft",
-           lty = c(cilty, fitlty),
-           lwd = c(cilwd, fitlwd),
-           col = c(cicol, fitcol),
-           legend = c("Credible limits", "loglogistic"),
-           bty = "n")
-  }
-}
-
-reproFitPlotGGCredInt <- function(curv_resp, cred.int, spaghetti.CI, dataCIm,
-                                  cicol, cilty, cilwd, valCols, fitlty, fitlwd, ribcol,
-                                  xlab, ylab, main, ylim_CI) {
-  # IC
-  data.three <- data.frame(conc = curv_resp$conc,
-                           qinf95 = cred.int[["qinf95"]],
-                           qsup95 = cred.int[["qsup95"]],
-                           Cred.Lim = "Credible limits")
-  
-  plt_31 <- if (!is.null(spaghetti.CI)) {
-    ggplot(data.three) + geom_line(data = dataCIm, aes(x = curv_conc, y = value,
-                                                       group = variable),
-                                   col = "gray", alpha = 0.05)
-  } else {
-    ggplot(data.three) + geom_ribbon(data = data.three, aes(x = conc,
-                                                            ymin = qinf95,
-                                                            ymax = qsup95),
-                                     fill = ribcol, col = NA,
-                                     alpha = 0.4)
-  }
-  
-  plt_3 <- plt_31 +
-    geom_line(data = data.three, aes(conc, qinf95, color = Cred.Lim),
-              linetype = cilty, size = cilwd) +
-    geom_line(data = data.three, aes(conc, qsup95, color = Cred.Lim),
-              linetype = cilty, size = cilwd) +
-    scale_color_manual("", values = valCols$cols4) +
-    theme_minimal()
-  
-  # plot IC
-  # final plot
-  
-  if (!is.null(spaghetti.CI)) {
-    plt_40 <- ggplot(data.three) +
-      geom_line(data = dataCIm, aes(x = curv_conc, y = value, group = variable),
-                col = "gray", alpha = 0.05)
-  } else {
-    plt_40 <- ggplot(data.three) + geom_ribbon(data = data.three,
-                                               aes(x = conc,
-                                                   ymin = qinf95,
-                                                   ymax = qsup95),
-                                               fill = ribcol,
-                                               col = NA, alpha = 0.4)
-  }
-  
-  plt_4 <- plt_40 +
-    geom_line(data = data.three, aes(conc, qinf95),
-              linetype = cilty, size = cilwd, color = valCols$cols4) +
-    geom_line(data = data.three, aes(conc, qsup95),
-              linetype = cilty, size = cilwd, color = valCols$cols4) +
-    geom_line(aes(conc, resp), curv_resp,
-              linetype = fitlty, size = fitlwd, color = valCols$cols2) +
-    ylim(0, ylim_CI + 0.2) +
-    labs(x = xlab, y = ylab) +
-    ggtitle(main) + theme_minimal()
-  
-  return(list(plt_3 = plt_3,
-              plt_4 = plt_4))
-}
-
-reproFitPlotGG <- function(x, data_conc, transf_data_conc, data_resp,
-                           curv_conc, curv_resp,
-                           cred.int, spaghetti.CI, dataCIm,
-                           xlab, ylab, fitcol, fitlty, fitlwd,
-                           main, addlegend,
-                           cicol, cilty, cilwd, ribcol, log.scale, ylim_CI) {
-  
-  if (Sys.getenv("RSTUDIO") == "") {
-    dev.new() # create a new page plot
-    # when not use RStudio
-  }
-  
-  # dataframes points (data) and curve (curv)
-  # colors
-  valCols <- fCols(curv_resp, fitcol, cicol)
-
-  plt_4 <-
-    reproFitPlotGGCredInt(curv_resp, cred.int, spaghetti.CI, dataCIm,
-                          cicol, cilty, cilwd, valCols, fitlty, fitlwd, ribcol, xlab,
-                          ylab, main, ylim_CI)$plt_4
-  
-  if (addlegend) {
-    
-    # create legends
-    
-    # curve (to create the legend)
-    plt_2 <- ggplot(curv_resp) +
-      geom_line(data = curv_resp, aes(conc, resp, colour = Line),
-                linetype = fitlty, size = fitlwd) +
-      scale_color_manual("", values = valCols$cols2) +
-      theme_minimal()
-    
-    mylegend_2 <- legendGgplotFit(plt_2) # mean line legend
-    
-    plt_5 <- plt_4 + scale_x_continuous(breaks = transf_data_conc,
-                                        labels = data_conc)
-    
-    plt_3 <- reproFitPlotGGCredInt(curv_resp, cred.int, spaghetti.CI, dataCIm,
-                                   cicol, cilty, cilwd, valCols, fitlty,
-                                   fitlwd, ribcol, xlab, ylab, main, ylim_CI)$plt_3
-    
-    mylegend_3 <- legendGgplotFit(plt_3)
-    
-    grid.arrange(plt_5, arrangeGrob(mylegend_2, mylegend_3,
-                                    nrow = 6), ncol = 2,
-                 widths = c(6, 2))
-  }
-  else { # no legend
-    plt_5 <- plt_4 + scale_x_continuous(breaks = transf_data_conc,
-                                        labels = data_conc)
-    return(plt_5)
-  }
-}
-
+#' Plotting method for \code{reproFitTT} objects
+#' 
+#' This is the generic \code{plot} S3 method for the \code{reproFitTT} class.
+#' It plots the concentration-effect fit under target time reproduction
+#' analysis.
+#' 
+#' The fitted curve represents the \strong{estimated reproduction rate} at the target time
+#'  as a function of the chemical compound concentration.
+#' The function plots 95\% credible intervals for the estimated reproduction
+#' rate (by default the grey area around the fitted curve). Typically
+#' a good fit is expected to display a large overlap between the two types of intervals.
+#' If spaghetti = TRUE, the credible intervals are represented by two dotted
+#' lines limiting the credible band, and a spaghetti plot is added to this band.
+#' It consists of the representation of simulated curves using parameter values
+#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
+#' taken for this sample).
+#'
+#' @param x an object of class \code{reproFitTT}
+#' @param xlab a label for the \eqn{X}-axis, by default \code{Concentration}
+#' @param ylab a label for the \eqn{Y}-axis, by default \code{Nb of offspring per ind/day}
+#' @param main main title for the plot
+#' @param fitcol color of the fitted curve
+#' @param fitlty line type of the fitted curve
+#' @param fitlwd width of the fitted curve
+#' @param spaghetti if \code{TRUE}, the credible interval is represented by 
+#' multiple curves
+#' @param cicol color of the 95 \% credible limits
+#' @param cilty line type of the 95 \% credible limits
+#' @param cilwd width of the 95 \% credible limits
+#' @param ribcol color of the ribbon between lower and upper credible limits.
+#' Transparent if \code{NULL}
+#' @param addlegend if \code{TRUE}, adds a default legend to the plot
+#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
+#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @note When \code{style = "generic"}, the function calls the generic function
+#' \code{\link[graphics]{plot}}
+#' @note When \code{style = "ggplot"}, the function return an object of class
+#'  \code{ggplot}, see function \code{\link[ggplot2]{ggplot}} 
+#'
+#' 
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom gridExtra grid.arrange arrangeGrob
+#' @importFrom grid grid.rect gpar
+#' @importFrom graphics plot axis legend lines par points polygon
+#' segments title
+#' @importFrom reshape2 melt
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#' 
+#' @export
+plot.reproFitTT <- function(x,
+                            xlab = "Concentration",
+                            ylab = "Nb of offspring per ind/day",
+                            main = NULL,
+                            fitcol = "orange",
+                            fitlty = 1,
+                            fitlwd = 1,
+                            spaghetti = FALSE,
+                            cicol = "orange",
+                            cilty = 2,
+                            cilwd = 1,
+                            ribcol = "grey70",
+                            addlegend = FALSE,
+                            log.scale = FALSE,
+                            style = "ggplot", ...) {
+  # plot the fitted curve estimated by reproFitTT
+  # INPUTS
+  # - x:  reproFitTT object
+  # - xlab : label x
+  # - ylab : label y
+  # - main : main title
+  # - fitcol : color fitted curve
+  # - fitlty : type line fitted curve
+  # - fitlwd : width line fitted curve
+  # - cicol : color ci
+  # - cilty : type line ci
+  # - cilwd : width line ci
+  # - addlegend : boolean
+  # - log.scale : x log option
+  # - style : generic ou ggplot
+  # OUTPUT:
+  # - plot of fitted regression
+  
+  # Selection of datapoints that can be displayed given the type of scale
+  sel <- if (log.scale) x$dataTT$conc > 0 else TRUE
+  
+  dataTT <- x$dataTT[sel, ]
+  dataTT$resp <- dataTT$Nreprocumul / dataTT$Nindtime
+  transf_data_conc <- optLogTransform(log.scale, dataTT$conc)
+  
+  # Concentration values used for display in linear scale
+  display.conc <- (function() {
+    x <- optLogTransform(log.scale, dataTT$conc)
+    s <- seq(min(x),max(x), length = 100)
+    if(log.scale) exp(s) else s
+  })()
+  
+  # Possibly log transformed concentration values for display
+  curv_conc <- optLogTransform(log.scale, display.conc)
+  
+  cred.int <- reproMeanCredInt(x, display.conc)
+  
+  spaghetti.CI <- if (spaghetti) { reproSpaghetti(x, display.conc) } else NULL
+  dataCIm <- if (spaghetti) { melt(cbind(curv_conc, spaghetti.CI),
+                                   id.vars = c("curv_conc", "conc"))} else NULL
+  
+  curv_resp <- data.frame(conc = curv_conc, resp = cred.int[["q50"]],
+                          Line = "loglogistic")
+  
+  # ylim
+  ylim_CI <- if (spaghetti) { max(dataCIm$value, cred.int$qsup95)
+  } else {
+    max(cred.int$qsup95)
+  }
+
+  if (style == "generic") {
+    reproFitPlotGenericCredInt(x, dataTT$conc, transf_data_conc, dataTT$resp,
+                               curv_conc, curv_resp,
+                               cred.int, spaghetti.CI, dataCIm,
+                               xlab, ylab, fitcol, fitlty, fitlwd,
+                               main, addlegend,
+                               cicol, cilty, cilwd, ribcol, log.scale, ylim_CI)
+  }
+  else if (style == "ggplot") {
+    reproFitPlotGG(x, dataTT$conc, transf_data_conc, dataTT$resp,
+                   curv_conc, curv_resp,
+                   cred.int, spaghetti.CI, dataCIm,
+                   xlab, ylab, fitcol, fitlty, fitlwd,
+                   main, addlegend,
+                   cicol, cilty, cilwd, ribcol, log.scale, ylim_CI)
+  }
+  else stop("Unknown style")
+}
+
+#' @importFrom stats quantile rgamma
+reproMeanCredInt <- function(fit, x) {
+  # create the parameters for credible interval for the log logistic model
+  # INPUT:
+  # - fit : object of class reproFitTT
+  # - x : vector of concentrations values (x axis)
+  # OUTPUT:
+  # - ci : credible limit
+  
+  mctot <- do.call("rbind", fit$mcmc)
+  k <- nrow(mctot)
+  # parameters
+  d2 <- mctot[, "d"]
+  log10b2 <- mctot[, "log10b"]
+  b2 <- 10^log10b2
+  log10e2 <- mctot[, "log10e"]
+  e2 <- 10^log10e2
+  
+  # quantiles
+  qinf95 = NULL
+  q50 = NULL
+  qsup95 = NULL
+  
+  # poisson
+  if (fit$model.label == "P") {
+    for (i in 1:length(x)) {
+      theomean <- d2 / (1 + (x[i] / e2)^(b2)) # mean curve
+      # IC 95%
+      qinf95[i] <- quantile(theomean, probs = 0.025, na.rm = TRUE)
+      qsup95[i] <- quantile(theomean, probs = 0.975, na.rm = TRUE)
+      q50[i] <- quantile(theomean, probs = 0.5, na.rm = TRUE)
+    }
+  }
+  
+  # gamma poisson
+  else if (fit$model.label == "GP") {
+    # parameters
+    log10omega2 <- mctot[, "log10omega"]
+    omega2 <- 10^(log10omega2)
+    
+    for (i in 1:length(x)) {
+      theomean <- d2 / (1 + (x[i] / e2)^(b2)) # mean curve
+      theo <- rgamma(n = k, shape = theomean / omega2, rate = 1 / omega2)
+      # IC 95%
+      qinf95[i] <- quantile(theo, probs = 0.025, na.rm = TRUE)
+      qsup95[i] <- quantile(theo, probs = 0.975, na.rm = TRUE)
+      q50[i] <- quantile(theo, probs = 0.5, na.rm = TRUE)
+    }
+  }
+  # values for cred.int
+  ci <- data.frame(qinf95 = qinf95,
+                   q50 = q50,
+                   qsup95 = qsup95)
+  
+  return(ci)
+}
+
+reproSpaghetti <- function(fit, x) {
+  mctot <- do.call("rbind", fit$mcmc)
+  sel <- sample(nrow(mctot))[1:ceiling(nrow(mctot) / 10)]
+  k <- nrow(mctot[sel,])
+  # parameters
+  d2 <- mctot[, "d"][sel]
+  log10b2 <- mctot[, "log10b"][sel]
+  b2 <- 10^log10b2
+  log10e2 <- mctot[, "log10e"][sel]
+  e2 <- 10^log10e2
+  if (fit$model.label == "GP") {
+    log10omega2 <- mctot[, "log10omega"][sel]
+    omega2 <- 10^(log10omega2)
+  }
+  
+  # all theorical
+  dtheo <- array(data = NA, dim = c(length(x), length(e2)))
+  if (fit$model.label == "GP") dtheotemp <- dtheo
+  for (i in 1:length(e2)) {
+    if (fit$model.label == "P") {
+      dtheo[, i] <- d2[i] / (1 + (x / e2[i])^(b2[i])) # mean curve
+    }
+    else if (fit$model.label == "GP") {
+      dtheotemp[, i] <- d2[i] / (1 + (x / e2[i])^(b2[i])) # mean curve
+      dtheo[, i] <- rgamma(n = length(x), shape = dtheotemp[, i] / omega2[i],
+                           rate = 1 / omega2[i])
+    }
+  }
+  dtheof <- as.data.frame(cbind(x, dtheo))
+  names(dtheof) <- c("conc", paste0("X", 1:length(sel)))
+  
+  return(dtheof)
+}
+
+#' @importFrom epitools pois.exact
+reproFitPlotGenericCredInt <- function(x, data_conc, transf_data_conc, data_resp,
+                                       curv_conc, curv_resp,
+                                       cred.int, spaghetti.CI, dataCIm,
+                                       xlab, ylab, fitcol, fitlty, fitlwd,
+                                       main, addlegend,
+                                       cicol, cilty, cilwd, ribcol, log.scale, ylim_CI) {
+
+  # plot the fitted curve estimated by reproFitTT
+  # with generic style with credible interval
+  
+  plot(transf_data_conc, data_resp,
+       xlab = xlab,
+       ylab = ylab,
+       main = main,
+       xaxt = "n",
+       yaxt = "n",
+       ylim = c(0, ylim_CI + 0.01),
+       type = "n")
+  
+  # axis
+  axis(side = 2, at = pretty(c(0, ylim_CI)))
+  axis(side = 1,
+       at = transf_data_conc,
+       labels = data_conc)
+  
+  # Plotting the theoretical curve
+  # cred.int ribbon + lines
+  if (!is.null(spaghetti.CI)) {
+    color <- "gray"
+    color_transparent <- adjustcolor(color, alpha.f = 0.05)
+    by(dataCIm, dataCIm$variable, function(x) {
+      lines(x$curv_conc, x$value, col = color_transparent)
+    })
+  } else {
+    polygon(c(curv_conc, rev(curv_conc)), c(cred.int[["qinf95"]],
+                                            rev(cred.int[["qsup95"]])),
+            col = ribcol, border = NA)
+  }
+  
+  lines(curv_conc, cred.int[["qsup95"]], type = "l", col = cicol, lty = cilty,
+        lwd = cilwd)
+  lines(curv_conc, cred.int[["qinf95"]], type = "l", col = cicol, lty = cilty,
+        lwd = cilwd)
+  
+  # fitted curve
+  lines(curv_conc, curv_resp[, "resp"], col = fitcol,
+        lty = fitlty, lwd = fitlwd, type = "l")
+  
+  # legend
+  if(addlegend)  {
+    legend("bottomleft",
+           lty = c(cilty, fitlty),
+           lwd = c(cilwd, fitlwd),
+           col = c(cicol, fitcol),
+           legend = c("Credible limits", "loglogistic"),
+           bty = "n")
+  }
+}
+
+reproFitPlotGGCredInt <- function(curv_resp, cred.int, spaghetti.CI, dataCIm,
+                                  cicol, cilty, cilwd, valCols, fitlty, fitlwd, ribcol,
+                                  xlab, ylab, main, ylim_CI) {
+  # IC
+  data.three <- data.frame(conc = curv_resp$conc,
+                           qinf95 = cred.int[["qinf95"]],
+                           qsup95 = cred.int[["qsup95"]],
+                           Cred.Lim = "Credible limits")
+  
+  plt_31 <- if (!is.null(spaghetti.CI)) {
+    ggplot(data.three) + geom_line(data = dataCIm, aes(x = curv_conc, y = value,
+                                                       group = variable),
+                                   col = "gray", alpha = 0.05)
+  } else {
+    ggplot(data.three) + geom_ribbon(data = data.three, aes(x = conc,
+                                                            ymin = qinf95,
+                                                            ymax = qsup95),
+                                     fill = ribcol, col = NA,
+                                     alpha = 0.4)
+  }
+  
+  plt_3 <- plt_31 +
+    geom_line(data = data.three, aes(conc, qinf95, color = Cred.Lim),
+              linetype = cilty, size = cilwd) +
+    geom_line(data = data.three, aes(conc, qsup95, color = Cred.Lim),
+              linetype = cilty, size = cilwd) +
+    scale_color_manual("", values = valCols$cols4) +
+    theme_minimal()
+  
+  # plot IC
+  # final plot
+  
+  if (!is.null(spaghetti.CI)) {
+    plt_40 <- ggplot(data.three) +
+      geom_line(data = dataCIm, aes(x = curv_conc, y = value, group = variable),
+                col = "gray", alpha = 0.05)
+  } else {
+    plt_40 <- ggplot(data.three) + geom_ribbon(data = data.three,
+                                               aes(x = conc,
+                                                   ymin = qinf95,
+                                                   ymax = qsup95),
+                                               fill = ribcol,
+                                               col = NA, alpha = 0.4)
+  }
+  
+  plt_4 <- plt_40 +
+    geom_line(data = data.three, aes(conc, qinf95),
+              linetype = cilty, size = cilwd, color = valCols$cols4) +
+    geom_line(data = data.three, aes(conc, qsup95),
+              linetype = cilty, size = cilwd, color = valCols$cols4) +
+    geom_line(aes(conc, resp), curv_resp,
+              linetype = fitlty, size = fitlwd, color = valCols$cols2) +
+    ylim(0, ylim_CI + 0.2) +
+    labs(x = xlab, y = ylab) +
+    ggtitle(main) + theme_minimal()
+  
+  return(list(plt_3 = plt_3,
+              plt_4 = plt_4))
+}
+
+reproFitPlotGG <- function(x, data_conc, transf_data_conc, data_resp,
+                           curv_conc, curv_resp,
+                           cred.int, spaghetti.CI, dataCIm,
+                           xlab, ylab, fitcol, fitlty, fitlwd,
+                           main, addlegend,
+                           cicol, cilty, cilwd, ribcol, log.scale, ylim_CI) {
+  
+  if (Sys.getenv("RSTUDIO") == "") {
+    dev.new() # create a new page plot
+    # when not use RStudio
+  }
+  
+  # dataframes points (data) and curve (curv)
+  # colors
+  valCols <- fCols(curv_resp, fitcol, cicol)
+
+  plt_4 <-
+    reproFitPlotGGCredInt(curv_resp, cred.int, spaghetti.CI, dataCIm,
+                          cicol, cilty, cilwd, valCols, fitlty, fitlwd, ribcol, xlab,
+                          ylab, main, ylim_CI)$plt_4
+  
+  if (addlegend) {
+    
+    # create legends
+    
+    # curve (to create the legend)
+    plt_2 <- ggplot(curv_resp) +
+      geom_line(data = curv_resp, aes(conc, resp, colour = Line),
+                linetype = fitlty, size = fitlwd) +
+      scale_color_manual("", values = valCols$cols2) +
+      theme_minimal()
+    
+    mylegend_2 <- legendGgplotFit(plt_2) # mean line legend
+    
+    plt_5 <- plt_4 + scale_x_continuous(breaks = transf_data_conc,
+                                        labels = data_conc)
+    
+    plt_3 <- reproFitPlotGGCredInt(curv_resp, cred.int, spaghetti.CI, dataCIm,
+                                   cicol, cilty, cilwd, valCols, fitlty,
+                                   fitlwd, ribcol, xlab, ylab, main, ylim_CI)$plt_3
+    
+    mylegend_3 <- legendGgplotFit(plt_3)
+    
+    grid.arrange(plt_5, arrangeGrob(mylegend_2, mylegend_3,
+                                    nrow = 6), ncol = 2,
+                 widths = c(6, 2))
+  }
+  else { # no legend
+    plt_5 <- plt_4 + scale_x_continuous(breaks = transf_data_conc,
+                                        labels = data_conc)
+    return(plt_5)
+  }
+}
+
diff --git a/R/plot.survDataCstExp.R b/R/plot.survDataCstExp.R
index f5de0e72e363cf847efefb43b7cec8dd31c5a667..2ba54d84caec73097786aaf9f9af274aba8c3fac 100644
--- a/R/plot.survDataCstExp.R
+++ b/R/plot.survDataCstExp.R
@@ -1,297 +1,286 @@
-#' Plotting method for \code{survData} objects
-#'
-#' This is the generic \code{plot} S3 method for the \code{survData} class.
-#' It plots the number of survivors as a function of time.
-#'
-#' @param x an object of class \code{survData}
-#' @param xlab a label for the \eqn{X}-axis, by default \code{Time}
-#' @param ylab a label for the \eqn{Y}-axis, by default \code{Number of survivors}
-#' @param main main title for the plot
-#' @param concentration a numeric value corresponding to some concentration(s) in
-#' \code{data}. If \code{concentration = NULL}, draws a plot for each concentration
-#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
-#' @param pool.replicate if \code{TRUE}, the datapoints of each replicate are
-#' summed for a same concentration
-#' @param addlegend if \code{TRUE}, adds a default legend to the plot
-#' @param remove.someLabels if \code{TRUE}, removes 3/4 of \eqn{X}-axis labels in
-#' \code{'ggplot'} style to avoid label overlap
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @note When \code{style = "ggplot"} (default), the function calls function
-#' \code{\link[ggplot2]{ggplot}} and returns an object of class \code{ggplot}.
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#' # (1) Load the data
-#' data(zinc)
-#' zinc <- survData(zinc)
-#'
-#' # (2) Plot survival data with a ggplot style
-#' plot(zinc)
-#'
-#' # (3) Plot the survival data for one specific concentration
-#' plot(zinc, concentration = 0.66)
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @import dplyr
-#' @importFrom graphics plot axis legend lines par points title
-#' @importFrom methods is
-#' @importFrom stats aggregate
-#'
-#' @export
-plot.survDataCstExp <- function(x,
-                                xlab = "Time",
-                                ylab = "Number of survivors",
-                                main = NULL,
-                                concentration = NULL,
-                                style = "ggplot",
-                                pool.replicate = FALSE,
-                                addlegend = FALSE,
-                                remove.someLabels = FALSE, ...) {
-
-  if (!is(x,"survDataCstExp"))
-    stop("plot.survData: object of class survData expected")
-
-  if (style == "generic" && remove.someLabels)
-    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
-            call. = FALSE)
-
-  if (is.null(concentration) && addlegend)
-    warning("'addlegend' argument is valid only when 'concentration' is not null.",
-            call. = FALSE)
-
-  if (pool.replicate) {
-    # agregate by sum of replicate
-    x <- cbind(aggregate(Nsurv ~ time + conc, x, sum),
-               replicate = 1)
-  }
-
-  x <- as.data.frame(x)
-
-  if (is.null(concentration)) {
-    survDataPlotFull(x, xlab, ylab, style, remove.someLabels)
-  }  else {
-    survDataPlotFixedConc(x, xlab, ylab, main, concentration,
-                          style, addlegend, remove.someLabels)
-  }
-}
-
-
-# [ReplicateIndex(data)] builds a list of indices, each one named after
-# a replicate of [data], thus providing a dictionary from replicate names to
-# integer keys.
-ReplicateIndex <- function(data) {
-  replicate <- unique(data$replicate)
-  r <- as.list(seq(1, length(replicate)))
-  names(r) <- as.character(replicate)
-  return(r)
-}
-
-
-# General full plot: one subplot for each concentration, and one color for
-# each replicate (for generic graphics)
-dataPlotFullGeneric <- function(data, xlab, ylab, resp) {
-  replicate.index <- ReplicateIndex(data)
-
-  # creation of a vector of colors
-  colors <- rainbow(length(unique(data$replicate)))
-  pchs <- as.numeric(unique(data$replicate))
-  # split of the graphical window in subplots
-  oldpar <- par(no.readonly = TRUE)
-  on.exit(par(oldpar)) 
-  par(mfrow = plotMatrixGeometry(length(unique(data$conc))))
-
-  by(data, data$conc, function(x) {
-    x <- as.data.frame(x)
-    # background
-    plot(x$time, rep(0, length(x$time)),
-         xlab = xlab,
-         ylab = ylab,
-         ylim = c(0, max(x[, resp])),
-         type = "n",
-         col = 'white',
-         xaxt = "n",
-         yaxt = "n")
-
-    # axis
-    axis(side = 1, at = sort(unique(as.data.frame(x)[, "time"])))
-    axis(side = 2, at = unique(round(pretty(c(0, max(x[, resp]))))))
-
-
-    # lines and points
-    by(x, x$replicate, function(y) {
-      index <- replicate.index[[y$replicate[1]]]
-      lines(y$time, y[, resp],
-            type = "l",
-            col = colors[index])
-      points(y$time, y[, resp],
-             pch = pchs[index],
-             col = colors[index])
-    })
-
-    # title
-    title(paste("Conc: ", unique(x$conc), sep = ""))
-  })
-
-  par(mfrow = c(1, 1))
-}
-
-# general full plot (ggplot variant): one subplot for each concentration,
-# and one color for each replicate
-#' @import ggplot2
-dataPlotFullGG <- function(data, xlab, ylab, resp, remove.someLabels) {
-
-  data <- as.data.frame(data)
-  time = NULL
-  Nsurv = NULL
-
-  data$response <- data[, resp]
-
-  # create ggplot object Nsurv / time / replicate / conc
-  fg <- ggplot(data = data, aes(time, response, colour = factor(replicate))) +
-        geom_point() +
-        geom_line()  +
-        labs(x = xlab, y = ylab) +
-        facet_wrap(~conc, ncol = 2) +
-        scale_x_continuous(breaks = unique(data$time),
-                           labels = if (remove.someLabels) {
-                             exclude_labels(unique(data$time))
-                           } else {
-                             unique(data$time)
-                           }
-        ) +
-        scale_y_continuous(breaks = unique(round(pretty(c(0, max(data$response)))))) +
-        expand_limits(x = 0, y = 0) +
-        theme_minimal()
-
-   fd <- fg + theme(legend.position = "none") # remove legend
-
-   return(fd)
-
-}
-
-dataPlotFull <- function(data, xlab, ylab, resp, style = "generic",
-                         remove.someLabels = FALSE) {
-
-  if (missing(xlab)) xlab <- "Time"
-
-  if (style == "generic")
-    dataPlotFullGeneric(data, xlab, ylab, resp)
-  else if (style == "ggplot")
-    dataPlotFullGG(data, xlab, ylab, resp, remove.someLabels)
-  else stop("Unknown plot style")
-}
-
-survDataPlotFull <- function(data, xlab, ylab,
-                             style = "ggplot",
-                             remove.someLabels = FALSE) {
-  dataPlotFull(data, xlab, ylab, "Nsurv", style, remove.someLabels)
-}
-
-dataPlotFixedConc <- function(x,
-                              xlab,
-                              ylab,
-                              main,
-                              resp,
-                              concentration,
-                              style = "generic",
-                              addlegend = FALSE,
-                              remove.someLabels = FALSE) {
-
-  x <- as.data.frame(x) # x is interpreted as a tibble
-
-    if (missing(xlab)) xlab <- "Time"
-
-  legend.position <- ifelse(resp == "Nsurv", "bottomleft", "topleft")
-
-  # check concentration value
-  if (!concentration %in% x$conc)
-    stop("The argument [concentration] should correspond to one of the tested concentrations")
-
-  # select the concentration
-  x <- filter(x, x$conc == concentration)
-
-  # vector color
-  x$color <- as.numeric(as.factor(x$replicate))
-
-  if (style == "generic") {
-    plot(x$time, x[, resp],
-         type = "n",
-         xaxt = "n",
-         yaxt = "n",
-         main = main,
-         xlim = range(x$time),
-         ylim = c(0, max(x[, resp])),
-         xlab = xlab,
-         ylab = ylab)
-
-    # one line by replicate
-    by(x, list(x$replicate),
-       function(x) {
-         lines(x$time, x[,resp], # lines
-               col = x$color)
-         points(x$time, x[,resp], # points
-                pch = 16,
-                col = x$color)
-       })
-
-    # axis
-    axis(side = 1, at = sort(unique(x[, "time"])))
-    axis(side = 2, at = unique(round(pretty(c(0, max(x[, resp]))))))
-
-    if (addlegend && !length(unique(x$replicate)) == 1) {
-      legend(legend.position, legend = unique(x$replicate) ,
-             col = unique(x$color),
-             pch = 16,
-             lty = 1)
-    }
-  }
-  else if (style == "ggplot") {
-    x$response <- x[,resp]
-
-    if (length(unique(x$replicate)) == 1) {
-      df <- ggplot(x, aes(x = time, y = response))
-    } else {
-      df <- ggplot(x, aes(x = time, y = response,
-                          color = factor(replicate),
-                          group = replicate))
-    }
-    fd <- df + geom_line() + geom_point() + ggtitle(main) +
-      theme_minimal() +
-      labs(x = xlab,
-           y = ylab) +
-      scale_color_hue("Replicate") +
-      scale_x_continuous(breaks = unique(x$time),
-                         labels = if (remove.someLabels) {
-                           exclude_labels(unique(x$time))
-                         } else {
-                           unique(x$time)
-                         }) +
-      scale_y_continuous(breaks = unique(round(pretty(c(0, max(x$response)))))) +
-      expand_limits(x = 0, y = 0)
-
-    if (addlegend) {# only if pool.replicate == FALSE
-      fd
-    } else {
-      fd + theme(legend.position = "none") # remove legend
-    }
-  }
-  else stop("Unknown plot style")
-}
-
-survDataPlotFixedConc <- function(x,
-                                  xlab,
-                                  ylab,
-                                  main,
-                                  concentration,
-                                  style = "generic",
-                                  addlegend = FALSE,
-                                  remove.someLabels = FALSE) {
-
-  dataPlotFixedConc(x, xlab, ylab, main, "Nsurv", concentration,
-                    style, addlegend, remove.someLabels)
-}
+#' Plotting method for \code{survData} objects
+#'
+#' This is the generic \code{plot} S3 method for the \code{survData} class.
+#' It plots the number of survivors as a function of time.
+#'
+#' @param x an object of class \code{survData}
+#' @param xlab a label for the \eqn{X}-axis, by default \code{Time}
+#' @param ylab a label for the \eqn{Y}-axis, by default \code{Number of survivors}
+#' @param main main title for the plot
+#' @param concentration a numeric value corresponding to some concentration(s) in
+#' \code{data}. If \code{concentration = NULL}, draws a plot for each concentration
+#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
+#' @param pool.replicate if \code{TRUE}, the datapoints of each replicate are
+#' summed for a same concentration
+#' @param addlegend if \code{TRUE}, adds a default legend to the plot
+#' @param remove.someLabels if \code{TRUE}, removes 3/4 of \eqn{X}-axis labels in
+#' \code{'ggplot'} style to avoid label overlap
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @note When \code{style = "ggplot"} (default), the function calls function
+#' \code{\link[ggplot2]{ggplot}} and returns an object of class \code{ggplot}.
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @import dplyr
+#' @importFrom graphics plot axis legend lines par points title
+#' @importFrom methods is
+#' @importFrom stats aggregate
+#'
+#' @export
+plot.survDataCstExp <- function(x,
+                                xlab = "Time",
+                                ylab = "Number of survivors",
+                                main = NULL,
+                                concentration = NULL,
+                                style = "ggplot",
+                                pool.replicate = FALSE,
+                                addlegend = FALSE,
+                                remove.someLabels = FALSE, ...) {
+
+  if (!is(x,"survDataCstExp"))
+    stop("plot.survData: object of class survData expected")
+
+  if (style == "generic" && remove.someLabels)
+    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
+            call. = FALSE)
+
+  if (is.null(concentration) && addlegend)
+    warning("'addlegend' argument is valid only when 'concentration' is not null.",
+            call. = FALSE)
+
+  if (pool.replicate) {
+    # agregate by sum of replicate
+    x <- cbind(aggregate(Nsurv ~ time + conc, x, sum),
+               replicate = 1)
+  }
+
+  x <- as.data.frame(x)
+
+  if (is.null(concentration)) {
+    survDataPlotFull(x, xlab, ylab, style, remove.someLabels)
+  }  else {
+    survDataPlotFixedConc(x, xlab, ylab, main, concentration,
+                          style, addlegend, remove.someLabels)
+  }
+}
+
+
+# [ReplicateIndex(data)] builds a list of indices, each one named after
+# a replicate of [data], thus providing a dictionary from replicate names to
+# integer keys.
+ReplicateIndex <- function(data) {
+  replicate <- unique(data$replicate)
+  r <- as.list(seq(1, length(replicate)))
+  names(r) <- as.character(replicate)
+  return(r)
+}
+
+
+# General full plot: one subplot for each concentration, and one color for
+# each replicate (for generic graphics)
+dataPlotFullGeneric <- function(data, xlab, ylab, resp) {
+  replicate.index <- ReplicateIndex(data)
+
+  # creation of a vector of colors
+  colors <- rainbow(length(unique(data$replicate)))
+  pchs <- as.numeric(unique(data$replicate))
+  # split of the graphical window in subplots
+  oldpar <- par(no.readonly = TRUE)
+  on.exit(par(oldpar)) 
+  par(mfrow = plotMatrixGeometry(length(unique(data$conc))))
+
+  by(data, data$conc, function(x) {
+    x <- as.data.frame(x)
+    # background
+    plot(x$time, rep(0, length(x$time)),
+         xlab = xlab,
+         ylab = ylab,
+         ylim = c(0, max(x[, resp])),
+         type = "n",
+         col = 'white',
+         xaxt = "n",
+         yaxt = "n")
+
+    # axis
+    axis(side = 1, at = sort(unique(as.data.frame(x)[, "time"])))
+    axis(side = 2, at = unique(round(pretty(c(0, max(x[, resp]))))))
+
+
+    # lines and points
+    by(x, x$replicate, function(y) {
+      index <- replicate.index[[y$replicate[1]]]
+      lines(y$time, y[, resp],
+            type = "l",
+            col = colors[index])
+      points(y$time, y[, resp],
+             pch = pchs[index],
+             col = colors[index])
+    })
+
+    # title
+    title(paste("Conc: ", unique(x$conc), sep = ""))
+  })
+
+  par(mfrow = c(1, 1))
+}
+
+# general full plot (ggplot variant): one subplot for each concentration,
+# and one color for each replicate
+#' @import ggplot2
+dataPlotFullGG <- function(data, xlab, ylab, resp, remove.someLabels) {
+
+  data <- as.data.frame(data)
+  time = NULL
+  Nsurv = NULL
+
+  data$response <- data[, resp]
+
+  # create ggplot object Nsurv / time / replicate / conc
+  fg <- ggplot(data = data, aes(time, response, colour = factor(replicate))) +
+        geom_point() +
+        geom_line()  +
+        labs(x = xlab, y = ylab) +
+        facet_wrap(~conc, ncol = 2) +
+        scale_x_continuous(breaks = unique(data$time),
+                           labels = if (remove.someLabels) {
+                             exclude_labels(unique(data$time))
+                           } else {
+                             unique(data$time)
+                           }
+        ) +
+        scale_y_continuous(breaks = unique(round(pretty(c(0, max(data$response)))))) +
+        expand_limits(x = 0, y = 0) +
+        theme_minimal()
+
+   fd <- fg + theme(legend.position = "none") # remove legend
+
+   return(fd)
+
+}
+
+dataPlotFull <- function(data, xlab, ylab, resp, style = "generic",
+                         remove.someLabels = FALSE) {
+
+  if (missing(xlab)) xlab <- "Time"
+
+  if (style == "generic")
+    dataPlotFullGeneric(data, xlab, ylab, resp)
+  else if (style == "ggplot")
+    dataPlotFullGG(data, xlab, ylab, resp, remove.someLabels)
+  else stop("Unknown plot style")
+}
+
+survDataPlotFull <- function(data, xlab, ylab,
+                             style = "ggplot",
+                             remove.someLabels = FALSE) {
+  dataPlotFull(data, xlab, ylab, "Nsurv", style, remove.someLabels)
+}
+
+dataPlotFixedConc <- function(x,
+                              xlab,
+                              ylab,
+                              main,
+                              resp,
+                              concentration,
+                              style = "generic",
+                              addlegend = FALSE,
+                              remove.someLabels = FALSE) {
+
+  x <- as.data.frame(x) # x is interpreted as a tibble
+
+    if (missing(xlab)) xlab <- "Time"
+
+  legend.position <- ifelse(resp == "Nsurv", "bottomleft", "topleft")
+
+  # check concentration value
+  if (!concentration %in% x$conc)
+    stop("The argument [concentration] should correspond to one of the tested concentrations")
+
+  # select the concentration
+  x <- filter(x, x$conc == concentration)
+
+  # vector color
+  x$color <- as.numeric(as.factor(x$replicate))
+
+  if (style == "generic") {
+    plot(x$time, x[, resp],
+         type = "n",
+         xaxt = "n",
+         yaxt = "n",
+         main = main,
+         xlim = range(x$time),
+         ylim = c(0, max(x[, resp])),
+         xlab = xlab,
+         ylab = ylab)
+
+    # one line by replicate
+    by(x, list(x$replicate),
+       function(x) {
+         lines(x$time, x[,resp], # lines
+               col = x$color)
+         points(x$time, x[,resp], # points
+                pch = 16,
+                col = x$color)
+       })
+
+    # axis
+    axis(side = 1, at = sort(unique(x[, "time"])))
+    axis(side = 2, at = unique(round(pretty(c(0, max(x[, resp]))))))
+
+    if (addlegend && !length(unique(x$replicate)) == 1) {
+      legend(legend.position, legend = unique(x$replicate) ,
+             col = unique(x$color),
+             pch = 16,
+             lty = 1)
+    }
+  }
+  else if (style == "ggplot") {
+    x$response <- x[,resp]
+
+    if (length(unique(x$replicate)) == 1) {
+      df <- ggplot(x, aes(x = time, y = response))
+    } else {
+      df <- ggplot(x, aes(x = time, y = response,
+                          color = factor(replicate),
+                          group = replicate))
+    }
+    fd <- df + geom_line() + geom_point() + ggtitle(main) +
+      theme_minimal() +
+      labs(x = xlab,
+           y = ylab) +
+      scale_color_hue("Replicate") +
+      scale_x_continuous(breaks = unique(x$time),
+                         labels = if (remove.someLabels) {
+                           exclude_labels(unique(x$time))
+                         } else {
+                           unique(x$time)
+                         }) +
+      scale_y_continuous(breaks = unique(round(pretty(c(0, max(x$response)))))) +
+      expand_limits(x = 0, y = 0)
+
+    if (addlegend) {# only if pool.replicate == FALSE
+      fd
+    } else {
+      fd + theme(legend.position = "none") # remove legend
+    }
+  }
+  else stop("Unknown plot style")
+}
+
+survDataPlotFixedConc <- function(x,
+                                  xlab,
+                                  ylab,
+                                  main,
+                                  concentration,
+                                  style = "generic",
+                                  addlegend = FALSE,
+                                  remove.someLabels = FALSE) {
+
+  dataPlotFixedConc(x, xlab, ylab, main, "Nsurv", concentration,
+                    style, addlegend, remove.someLabels)
+}
diff --git a/R/plot.survFitPredict.R b/R/plot.survFitPredict.R
index fac221e91df7a728b8463442be1bd4f9107e7048..f8667b74e6638acb075c94f8a3219e09c693de40 100644
--- a/R/plot.survFitPredict.R
+++ b/R/plot.survFitPredict.R
@@ -1,120 +1,96 @@
-#' Plotting method for \code{survFitPredict} objects
-#'
-#' This is the generic \code{plot} S3 method for the
-#' \code{survFitPredict}.  It plots the predicted survival probability for each
-#' concentration of the chemical compound in the provided dataset.
-#'
-#' The fitted curves represent the \strong{predicted survival probability} as a function
-#' of time for each concentration.
-#' The function plots both the 95\% credible band and the predicted survival
-#' probability over time.
-#' If \code{spaghetti = TRUE}, the credible intervals are represented by two
-#' dotted lines limiting the credible band, and a spaghetti plot is added to this band.
-#' This spaghetti plot consists of the representation of simulated curves using parameter values
-#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
-#' taken for this sample).
-#'
-#' @param x An object of class \code{survFitPredict}.
-#' @param xlab A label for the \eqn{X}-axis, by default \code{Time}.
-#' @param ylab A label for the \eqn{Y}-axis, by default \code{Survival probability}.
-#' @param main A main title for the plot.
-#' @param spaghetti If \code{TRUE}, draws a set of survival curves using
-#' parameters drawn from the posterior distribution
-#' @param one.plot if \code{TRUE}, draws all the estimated curves in
-#' one plot instead of one plot per concentration.
-#' @param mcmc_size A numerical value refering by default to the size of the mcmc in object \code{survFitPredict}.
-#'  This option is specific to \code{survFitPredict} objects for which computing time may be long.
-#'  \code{mcmc_size} can be used to reduce the number of mcmc samples in order to speed up
-#'  the computation.
-#'  
-#' @param \dots Further arguments to be passed to generic methods.
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#' 
-#' @examples 
-#'
-#' # (1) Load the survival data
-#' data("propiconazole_pulse_exposure")
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole_pulse_exposure)
-#'
-#' \donttest{
-#' # (3) Run the survFit function
-#' out <- survFit(dataset , model_type = "SD")
-#'
-#' # (4) Create a new data table for prediction
-#' data_4prediction <- data.frame(time = 1:10, conc = c(0,5,5,5,0,0,5,5,5,5),
-#'  replicate= rep("predict", 10))
-#'
-#' # (5) Predict on a new dataset
-#' predict_out <- predict(out, data_predict = data_4prediction, spaghetti = TRUE)
-#'
-#' # (6) Plot the predicted curve
-#' plot(predict_out)
-#' plot(predict_out, spaghetti = TRUE)
-#' }
-#' 
-#' @export
-#' 
-#' @importFrom tidyr gather
-#'
-plot.survFitPredict <- function(x,
-                               xlab = "Time",
-                               ylab = "Survival probability",
-                               main = NULL,
-                               spaghetti = FALSE,
-                               one.plot = FALSE,
-                               mcmc_size = NULL,
-                               ...) {
-
-  df_prediction <-  x$df_quantile
-  df_spaghetti <-  x$df_spaghetti
-
-  # Plot
-  plt <- ggplot() +
-    theme_minimal() +
-    scale_x_continuous(name = xlab) +
-    scale_y_continuous(name = ylab,
-                       limits = c(0,1)) +
-    theme(legend.position = "top")
-  
-  # spaghetti
-  if(spaghetti == TRUE){
-    
-    df_spaghetti_gather <- df_spaghetti %>%
-      tidyr::gather(survRate_key, survRate_value, -c(time,conc,replicate))
-    
-    plt <- plt +
-      geom_line(data = df_spaghetti_gather,
-                aes(x = time, y = survRate_value, group = interaction(survRate_key, replicate)),
-                alpha = 0.2) +
-      geom_line(data = df_prediction,
-                aes(x = time, y= qinf95, group = replicate),
-                color = "orange", linetype = 2) +
-      geom_line(data = df_prediction,
-                aes(x = time, y = qsup95, group = replicate),
-                color = "orange", linetype = 2)
-  }
-  if(spaghetti != TRUE){
-    plt <- plt + 
-      geom_ribbon(data = df_prediction,
-                  aes(x = time, ymin = qinf95,ymax = qsup95, group = replicate),
-                  fill = "grey70", alpha = 0.4)
-  }
-  # Prediction
-  plt <- plt +
-    geom_line(data = df_prediction,
-              aes(x = time, y = q50, group = replicate),
-              col="orange", size = 1)
-
-  # facetting
-  if(one.plot == FALSE){
-    plt <- plt + facet_wrap(~ replicate)
-  }  
-  
-  return(plt)
-}
-
+#' Plotting method for \code{survFitPredict} objects
+#'
+#' This is the generic \code{plot} S3 method for the
+#' \code{survFitPredict}.  It plots the predicted survival probability for each
+#' concentration of the chemical compound in the provided dataset.
+#'
+#' The fitted curves represent the \strong{predicted survival probability} as a function
+#' of time for each concentration.
+#' The function plots both the 95\% credible band and the predicted survival
+#' probability over time.
+#' If \code{spaghetti = TRUE}, the credible intervals are represented by two
+#' dotted lines limiting the credible band, and a spaghetti plot is added to this band.
+#' This spaghetti plot consists of the representation of simulated curves using parameter values
+#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
+#' taken for this sample).
+#'
+#' @param x An object of class \code{survFitPredict}.
+#' @param xlab A label for the \eqn{X}-axis, by default \code{Time}.
+#' @param ylab A label for the \eqn{Y}-axis, by default \code{Survival probability}.
+#' @param main A main title for the plot.
+#' @param spaghetti If \code{TRUE}, draws a set of survival curves using
+#' parameters drawn from the posterior distribution
+#' @param one.plot if \code{TRUE}, draws all the estimated curves in
+#' one plot instead of one plot per concentration.
+#' @param mcmc_size A numerical value refering by default to the size of the mcmc in object \code{survFitPredict}.
+#'  This option is specific to \code{survFitPredict} objects for which computing time may be long.
+#'  \code{mcmc_size} can be used to reduce the number of mcmc samples in order to speed up
+#'  the computation.
+#'  
+#' @param \dots Further arguments to be passed to generic methods.
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#' 
+#' @export
+#' 
+#' @importFrom tidyr gather
+#'
+plot.survFitPredict <- function(x,
+                               xlab = "Time",
+                               ylab = "Survival probability",
+                               main = NULL,
+                               spaghetti = FALSE,
+                               one.plot = FALSE,
+                               mcmc_size = NULL,
+                               ...) {
+
+  df_prediction <-  x$df_quantile
+  df_spaghetti <-  x$df_spaghetti
+
+  # Plot
+  plt <- ggplot() +
+    theme_minimal() +
+    scale_x_continuous(name = xlab) +
+    scale_y_continuous(name = ylab,
+                       limits = c(0,1)) +
+    theme(legend.position = "top")
+  
+  # spaghetti
+  if(spaghetti == TRUE){
+    
+    df_spaghetti_gather <- df_spaghetti %>%
+      tidyr::gather(survRate_key, survRate_value, -c(time,conc,replicate))
+    
+    plt <- plt +
+      geom_line(data = df_spaghetti_gather,
+                aes(x = time, y = survRate_value, group = interaction(survRate_key, replicate)),
+                alpha = 0.2) +
+      geom_line(data = df_prediction,
+                aes(x = time, y= qinf95, group = replicate),
+                color = "orange", linetype = 2) +
+      geom_line(data = df_prediction,
+                aes(x = time, y = qsup95, group = replicate),
+                color = "orange", linetype = 2)
+  }
+  if(spaghetti != TRUE){
+    plt <- plt + 
+      geom_ribbon(data = df_prediction,
+                  aes(x = time, ymin = qinf95,ymax = qsup95, group = replicate),
+                  fill = "grey70", alpha = 0.4)
+  }
+  # Prediction
+  plt <- plt +
+    geom_line(data = df_prediction,
+              aes(x = time, y = q50, group = replicate),
+              col="orange", size = 1)
+
+  # facetting
+  if(one.plot == FALSE){
+    plt <- plt + facet_wrap(~ replicate)
+  }  
+  
+  return(plt)
+}
+
diff --git a/R/plot.survFitTKTD.R b/R/plot.survFitTKTD.R
index 505e03e1bb072ccace16bc38127d585206766cfc..92d16d9106ca54325fee6d39795ebbef72760eea 100644
--- a/R/plot.survFitTKTD.R
+++ b/R/plot.survFitTKTD.R
@@ -42,29 +42,6 @@
 #' 
 #' @return a plot of class \code{ggplot}
 #' 
-#' @examples
-#' 
-#' # (1) Load the survival data
-#' data(propiconazole)
-#' 
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole)
-#' 
-#' \donttest{
-#' # (3) Run the survFitTKTD function ('SD' model only)
-#' out <- survFitTKTD(dataset)
-#'
-#' # (4) Plot the fitted curves in one plot 
-#' plot(out)
-#'
-#' # (5) Plot one fitted curve per concentration with credible limits as
-#' # spaghetti, data and confidence intervals
-#' # and with a ggplot style
-#' plot(out, spaghetti = TRUE , adddata = TRUE, one.plot = FALSE,
-#'      style = "ggplot")
-#'
-#' }
-#' 
 #' @export
 #' 
 #' @import ggplot2
diff --git a/R/plot.survFitTT.R b/R/plot.survFitTT.R
index d23d86cd17ca5d6674d3b337c9367037a47185e6..216fe9d809f4a72d6bfe6496a7be4f45160fb193 100644
--- a/R/plot.survFitTT.R
+++ b/R/plot.survFitTT.R
@@ -1,504 +1,481 @@
-#' Plotting method for \code{survFitTT} objects
-#'
-#' This is the generic \code{plot} S3 method for the \code{survFitTT} class. It
-#' plots concentration-response fit under target time survival analysis.
-#'
-#' The fitted curve represents the \strong{estimated survival probability} at
-#' the target time as a function of the concentration of chemical compound;
-#' When \code{adddata = TRUE} the black dots depict the \strong{observed survival
-#' probability} at each tested concentration. Note that since our model does not take
-#' inter-replicate variability into consideration, replicates are systematically
-#' pooled in this plot.
-#' The function plots both 95\% credible intervals for the estimated survival
-#' probability (by default the grey area around the fitted curve) and 95\% binomial confidence
-#' intervals for the observed survival probability (as black segments if
-#' \code{adddata = TRUE}).
-#' Both types of intervals are taken at the same level. Typically
-#' a good fit is expected to display a large overlap between the two intervals.
-#' If spaghetti = TRUE, the credible intervals are represented by two dotted
-#' lines limiting the credible band, and a spaghetti plot is added to this band.
-#' This spaghetti plot consists of the representation of simulated curves using parameter values
-#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
-#' taken for this sample).
-#'
-#' @param x an object of class \code{survFitTT}
-#' @param xlab a label for the \eqn{X}-axis, default is \code{Concentration}
-#' @param ylab a label for the \eqn{Y}-axis, default is \code{Survival probability}
-#' @param main main title for the plot
-#' @param fitcol color of the fitted curve
-#' @param fitlty line type of the fitted curve
-#' @param fitlwd width of the fitted curve
-#' @param spaghetti if \code{TRUE}, the credible interval is represented by 
-#' multiple curves
-#' @param cicol color of the 95 \% credible interval limits
-#' @param cilty line type for the 95 \% credible interval limits
-#' @param cilwd width of the 95 \% credible interval limits
-#' @param ribcol color of the ribbon between lower and upper credible limits.
-#' Transparent if \code{NULL}
-#' @param adddata if \code{TRUE}, adds the observed data with confidence intervals
-#' to the plot
-#' @param addlegend if \code{TRUE}, adds a default legend to the plot
-#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
-#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
-#' @param \dots Further arguments to be passed to generic methods
-#' @note When \code{style = "ggplot"}, the function calls function
-#' \code{\link[ggplot2]{ggplot}} and returns an object of class \code{ggplot}.
-#'
-#' @return a plot of class \code{ggplot}
-#' 
-#' @examples
-#'
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "survData"
-#' dat <- survData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the survFitTT function with the log-logistic
-#' #     binomial model
-#' out <- survFitTT(dat, lcx = c(5, 10, 15, 20, 30, 50, 80),
-#'                  quiet = TRUE)
-#'
-#' # (4) Plot the fitted curve
-#' plot(out, log.scale = TRUE, adddata = TRUE)
-#'
-#' # (5) Plot the fitted curve with ggplot style
-#' plot(out, xlab = expression("Concentration in" ~ mu~g.L^{-1}),
-#'      fitcol = "blue", adddata = TRUE, cicol = "blue",
-#'      style = "ggplot")
-#' }
-#'
-#' @keywords plot
-#'
-#' @import grDevices
-#' @import ggplot2
-#' @importFrom gridExtra grid.arrange arrangeGrob
-#' @importFrom grid grid.rect gpar
-#' @importFrom graphics plot axis legend lines par points polygon segments
-#' @importFrom stats aggregate
-#' @importFrom reshape2 melt
-#'
-#' @export
-plot.survFitTT <- function(x,
-                           xlab = "Concentration",
-                           ylab = "Survival probability",
-                           main = NULL,
-                           fitcol = "orange",
-                           fitlty = 1,
-                           fitlwd = 1,
-                           spaghetti = FALSE,
-                           cicol = "orange",
-                           cilty = 2,
-                           cilwd = 1,
-                           ribcol = "grey70",
-                           adddata = FALSE,
-                           addlegend = FALSE,
-                           log.scale = FALSE,
-                           style = "ggplot", ...) {
-  # plot the fitted curve estimated by survFitTT
-  # INPUTS
-  # - x:  survFitTt object
-  # - xlab : label x
-  # - ylab : label y
-  # - main : main title
-  # - fitcol : color fitted curve
-  # - fitlty : type line fitted curve
-  # - fitlwd : width line fitted curve
-  # - cicol : color ci ribbon
-  # - cilty : type line ci ribbon
-  # - cilwd : width line ci ribbon
-  # - addlegend : boolean
-  # - log.scale : x log option
-  # - style : generic or ggplot
-  # OUTPUT:
-  # - plot of fitted regression
-  
-  # Selection of datapoints that can be displayed given the type of scale
-  sel <- if(log.scale) x$dataTT$conc > 0 else TRUE
-  
-  dataTT <- x$dataTT[sel, ]
-  dataTT$resp <- dataTT$Nsurv / dataTT$Ninit
-  # data points are systematically pooled, since our model does not
-  # take individual variation into account
-  dataTT <- aggregate(resp ~ conc, dataTT, mean)
-  transf_data_conc <- optLogTransform(log.scale, dataTT$conc)
-  
-  # Concentration values used for display in linear scale
-  display.conc <- (function() {
-    x <- optLogTransform(log.scale, dataTT$conc)
-    s <- seq(min(x),max(x), length = 100)
-    if(log.scale) exp(s) else s
-  })()
-  
-  # Possibly log transformed concentration values for display
-  curv_conc <- optLogTransform(log.scale, display.conc)
-  
-  conf.int <- survLlbinomConfInt(x, log.scale)
-  cred.int <- survMeanCredInt(x, display.conc)
-  spaghetti.CI <- if (spaghetti) { survSpaghetti(x, display.conc) } else NULL
-  dataCIm <- if (spaghetti) {melt(cbind(curv_conc, spaghetti.CI),
-                                  id.vars = c("curv_conc", "conc"))} else NULL
-  
-  curv_resp <- data.frame(conc = curv_conc, resp = cred.int[["q50"]],
-                          Line = "loglogistic")
-  
-  if (style == "generic") {
-    survFitPlotGenericCredInt(x,
-                              dataTT$conc, transf_data_conc, dataTT$resp,
-                              curv_conc, curv_resp,
-                              conf.int, cred.int, spaghetti.CI, dataCIm,
-                              xlab, ylab, fitcol, fitlty, fitlwd,
-                              main, addlegend, adddata,
-                              cicol, cilty, cilwd, ribcol, log.scale)
-  }
-  else if (style == "ggplot") {
-    survFitPlotGG(x,
-                  dataTT$conc, transf_data_conc, dataTT$resp,
-                  curv_conc, curv_resp,
-                  conf.int, cred.int, spaghetti.CI, dataCIm,
-                  xlab, ylab, fitcol, fitlty, fitlwd,
-                  main, addlegend, adddata,
-                  cicol, cilty, cilwd / 2, ribcol)
-  }
-  else stop("Unknown style")
-}
-
-#' @importFrom stats aggregate binom.test
-survLlbinomConfInt <- function(x, log.scale) {
-  # create confidente interval on observed data for the log logistic
-  # binomial model by a binomial test
-  # INPUT:
-  # - x : object of class survFitTT
-  # - log.scale : boolean
-  # OUTPUT:
-  
-  # - ci : confidente interval
-  x <- cbind(aggregate(Nsurv ~ time + conc, x$dataTT, sum),
-             Ninit = aggregate(Ninit ~ time + conc, x$dataTT, sum)$Ninit)
-  
-  ci <- apply(x, 1, function(x) {
-    binom.test(x["Nsurv"], x["Ninit"])$conf.int
-  })
-  rownames(ci) <- c("qinf95", "qsup95")
-  colnames(ci) <- x$conc
-  
-  if (log.scale) ci <- ci[ ,colnames(ci) != 0]
-  
-  return(ci)
-}
-
-#' @importFrom stats quantile
-survMeanCredInt <- function(fit, x) {
-  # create the parameters for credible interval for the log logistic binomial
-  # model
-  # INPUT:
-  # - fit : object of class survFitTT
-  # - x : vector of concentrations values (x axis)
-  # OUTPUT:
-  # - ci : credible limit
-  
-  mctot <- do.call("rbind", fit$mcmc)
-  k <- nrow(mctot)
-  # parameters
-  if (fit$det.part == "loglogisticbinom_3") {
-    d2 <- mctot[, "d"]
-  }
-  log10b2 <- mctot[, "log10b"]
-  b2 <- 10^log10b2
-  log10e2 <- mctot[, "log10e"]
-  e2 <- 10^log10e2
-  
-  # quantiles
-  qinf95 = NULL
-  q50 = NULL
-  qsup95 = NULL
-  
-  for (i in 1:length(x)) {
-    # llbinom 2 parameters
-    if (fit$det.part == "loglogisticbinom_2") {
-      theomean <- 1 / (1 + (x[i] / e2)^(b2)) # mean curve
-    }
-    
-    # llbinom 3 parameters
-    else if (fit$det.part == "loglogisticbinom_3") {
-      theomean <- d2 / (1 + (x[i] / e2)^(b2)) # mean curve
-    }
-    # IC 95%
-    qinf95[i] <- quantile(theomean, probs = 0.025, na.rm = TRUE)
-    q50[i] <- quantile(theomean, probs = 0.5, na.rm = TRUE)
-    qsup95[i] <- quantile(theomean, probs = 0.975, na.rm = TRUE)
-  }
-  
-  # values for CI
-  ci <- data.frame(qinf95 = qinf95,
-                   q50 = q50,
-                   qsup95 = qsup95)
-  
-  return(ci)
-}
-
-survSpaghetti <- function(fit, x) {
-  mctot <- do.call("rbind", fit$mcmc)
-  sel <- sample(nrow(mctot))[1:ceiling(nrow(mctot) / 10)]
-  
-  # parameters
-  if (fit$det.part == "loglogisticbinom_3") {
-    d2 <- mctot[, "d"][sel]
-  }
-  log10b2 <- mctot[, "log10b"][sel]
-  b2 <- 10^log10b2
-  log10e2 <- mctot[, "log10e"][sel]
-  e2 <- 10^log10e2
-  
-  # all theorical
-  dtheo <- array(data = NA, dim = c(length(x), length(e2)))
-  for (i in 1:length(e2)) {
-    # llbinom 2 parameters
-    if (fit$det.part == "loglogisticbinom_2") {
-      dtheo[, i] <- 1 / (1 + (x / e2[i])^(b2[i])) # mean curve
-    }
-    # llbinom 3 parameters
-    else if (fit$det.part == "loglogisticbinom_3") {
-      dtheo[, i] <- d2[i] / (1 + (x / e2[i])^(b2[i])) # mean curve
-    }
-  }
-  dtheof <- as.data.frame(cbind(x, dtheo))
-  names(dtheof) <- c("conc", paste0("X", 1:length(sel)))
-  
-  return(dtheof)
-}
-
-survFitPlotGenericCredInt <- function(x,
-                                      data_conc, transf_data_conc, data_resp,
-                                      curv_conc, curv_resp,
-                                      conf.int, cred.int, spaghetti.CI, dataCIm,
-                                      xlab, ylab, fitcol, fitlty, fitlwd,
-                                      main, addlegend, adddata,
-                                      cicol, cilty, cilwd, ribcol, log.scale)
-{
-  # plot the fitted curve estimated by survFitTT
-  # with generic style with credible interval
-  plot(transf_data_conc, data_resp,
-       xlab = xlab,
-       ylab = ylab,
-       main = main,
-       xaxt = "n",
-       yaxt = "n",
-       ylim = c(0, 1.01),
-       type = "n")
-  
-  # axis
-  axis(side = 2, at = pretty(c(0, max(c(conf.int["qsup95",],
-                                        cred.int[["qsup95"]])))))
-  axis(side = 1,
-       at = transf_data_conc,
-       labels = data_conc)
-  
-  # Plotting the theoretical curve
-  # CI ribbon + lines
-  if (!is.null(spaghetti.CI)) {
-    color <- "gray"
-    color_transparent <- adjustcolor(color, alpha.f = 0.05)
-    by(dataCIm, dataCIm$variable, function(x) {
-      lines(x$curv_conc, x$value, col = color_transparent)
-    })
-  } else if(!is.null(ribcol)) {
-    polygon(c(curv_conc, rev(curv_conc)), c(cred.int[["qinf95"]],
-                                            rev(cred.int[["qsup95"]])),
-            col = ribcol, border = NA)
-  }
-  
-  lines(curv_conc, cred.int[["qsup95"]], type = "l", col = cicol, lty = cilty,
-        lwd = cilwd)
-  lines(curv_conc, cred.int[["qinf95"]], type = "l", col = cicol, lty = cilty,
-        lwd = cilwd)
-  
-  if (adddata) {
-    # segment CI
-    segments(transf_data_conc, data_resp,
-             transf_data_conc, conf.int["qsup95", ])
-    
-    segments(transf_data_conc, data_resp,
-             transf_data_conc, conf.int["qinf95", ])
-    
-    # points
-    points(transf_data_conc, data_resp, pch = 16)
-  }
-  
-  # fitted curve
-  lines(curv_conc, curv_resp[, "resp"], type = "l", col = fitcol, lty = fitlty,
-        lwd = fitlwd)
-  
-  # legend
-  if (addlegend) {
-    legend("bottomleft", pch = c(ifelse(adddata, 16, NA), NA, NA, NA),
-           lty = c(NA, ifelse(adddata, 1, NA), cilty, fitlty),
-           lwd = c(NA, ifelse(adddata,1, NA), cilwd, fitlwd),
-           col = c(ifelse(adddata, 1, NA), 1, cicol, fitcol),
-           legend = c(ifelse(adddata, "Observed values", NA),
-                      ifelse(adddata, "Confidence interval", NA),
-                      "Credible limits", x$det.part),
-           bty = "n")
-  }
-}
-
-#' @importFrom grid arrow unit
-survFitPlotGGCredInt <- function(x, data, curv_resp, conf.int, cred.int,
-                                 spaghetti.CI, dataCIm, cilty, cilwd,
-                                 valCols, fitlty, fitlwd, ribcol, xlab, ylab, main,
-                                 adddata) {
-  # IC
-  data.three <- data.frame(conc = data$transf_conc,
-                           qinf95 = conf.int["qinf95",],
-                           qsup95 = conf.int["qsup95",],
-                           Conf.Int = "Confidence interval")
-  data.four <- data.frame(conc = curv_resp$conc,
-                          qinf95 = cred.int[["qinf95"]],
-                          qsup95 = cred.int[["qsup95"]],
-                          Cred.Lim = "Credible limits")
-  
-  if (adddata) {
-    plt_3 <- ggplot(data) +
-      geom_segment(aes(x = conc, xend = conc, y = qinf95, yend = qsup95,
-                       linetype = Conf.Int), data.three,
-                   color = valCols$cols3) +
-      scale_linetype(name = "") +
-      theme_minimal()
-  }
-  
-  plt_302 <- if (!is.null(spaghetti.CI)) {
-    ggplot(data) + geom_line(data = dataCIm, aes(x = curv_conc, y = value,
-                                                 group = variable),
-                             col = "gray", alpha = 0.05)
-  } else {
-    ggplot(data) + geom_ribbon(data = data.four, aes(x = conc, ymin = qinf95,
-                                                     ymax = qsup95),
-                               fill = ribcol, col = NA, alpha = 0.4)
-  }
-  
-  plt_32 <- plt_302 +
-    geom_line(data = data.four, aes(conc, qinf95, color = Cred.Lim),
-              linetype = cilty, size = cilwd) +
-    geom_line(data = data.four, aes(conc, qsup95, color = Cred.Lim),
-              linetype = cilty, size = cilwd) +
-    scale_color_manual("", values = valCols$cols4) +
-    theme_minimal()
-  
-  # plot IC
-  # final plot
-  if (!is.null(spaghetti.CI)) {
-    plt_40 <- ggplot(data) +
-      geom_line(data = dataCIm, aes(x = curv_conc, y = value, group = variable),
-                col = "gray", alpha = 0.05)
-  } else {
-    plt_40 <- ggplot(data) + geom_ribbon(data = data.four, aes(x = conc,
-                                                               ymin = qinf95,
-                                                               ymax = qsup95),
-                                         fill = ribcol,
-                                         col = NA, alpha = 0.4)
-  }
-  
-  plt_401 <- plt_40 +
-    geom_line(data = data.four, aes(conc, qinf95),
-              linetype = cilty, size = cilwd, color = valCols$cols4) +
-    geom_line(data = data.four, aes(conc, qsup95),
-              linetype = cilty, size = cilwd, color = valCols$cols4) +
-    geom_line(data = curv_resp, aes(conc, resp),
-              linetype = fitlty, size = fitlwd, color = valCols$cols2) +
-    scale_color_discrete(guide = "none") +
-    ylim(0, 1) +
-    labs(x = xlab, y = ylab) +
-    ggtitle(main) + theme_minimal()
-  
-  if (adddata) {
-    plt_4 <- plt_401 + geom_point(data = data, aes(transf_conc, resp)) +
-      geom_segment(aes(x = conc, xend = conc, y = qinf95, yend = qsup95),
-                   data.three, col = valCols$cols3)
-  } else {
-    plt_4 <- plt_401
-  }
-  
-  return(list(plt_3 = if (adddata) plt_3 else NULL,
-              plt_32 = plt_32,
-              plt_4 = plt_4))
-}
-
-survFitPlotGG <- function(x,
-                          data_conc, transf_data_conc, data_resp,
-                          curv_conc, curv_resp,
-                          conf.int, cred.int, spaghetti.CI, dataCIm,
-                          xlab, ylab, fitcol, fitlty, fitlwd,
-                          main, addlegend, adddata,
-                          cicol, cilty, cilwd, ribcol) {
-  
-  
-  if (Sys.getenv("RSTUDIO") == "") {
-    dev.new() # create a new page plot
-    # when not use RStudio
-  }
-  
-  # dataframes points (data) and curve (curv)
-  data <- data.frame(conc = data_conc, transf_conc = transf_data_conc,
-                     resp = data_resp, Points = "Observed values")
-  
-  # colors
-  valCols <- fCols(data, fitcol, cicol)
-  
-  if (adddata) {
-    # points (to create the legend)
-    plt_1 <- ggplot(data) +
-      geom_point(data = data, aes(transf_conc, resp, fill = Points),
-                 col = valCols$cols1) + scale_fill_hue("") +
-      theme_minimal()
-  }
-  
-  # curve (to create the legend)
-  plt_2 <- ggplot(data) +
-    geom_line(data = curv_resp, aes(conc, resp, colour = Line),
-              linetype = fitlty, size = fitlwd) +
-    scale_colour_manual("", values = valCols$cols2) +
-    theme_minimal()
-  
-  plt_4 <-
-    survFitPlotGGCredInt(x, data, curv_resp, conf.int, cred.int, spaghetti.CI,
-                         dataCIm, cilty, cilwd, valCols, fitlty, fitlwd, ribcol,
-                         xlab, ylab, main, adddata)$plt_4
-  
-  if (addlegend) { # legend yes
-    # create legends
-    mylegend_1 <- if (adddata) { legendGgplotFit(plt_1) } else NULL # points legend
-    mylegend_2 <- legendGgplotFit(plt_2) # mean line legend
-    
-    plt_5 <- plt_4 + scale_x_continuous(breaks = data$transf_conc,
-                                        labels = data$conc)
-    
-    plt_3 <- survFitPlotGGCredInt(x, data, curv_resp, conf.int, cred.int, 
-                                  spaghetti.CI, dataCIm, cilty, cilwd,
-                                  valCols, fitlty, fitlwd, ribcol, xlab, ylab, main,
-                                  adddata)$plt_3
-    plt_32 <- survFitPlotGGCredInt(x, data, curv_resp, conf.int, cred.int, 
-                                   spaghetti.CI, dataCIm, cilty, cilwd,
-                                   valCols, fitlty, fitlwd, ribcol, xlab, ylab, main,
-                                   adddata)$plt_32
-    
-    mylegend_3 <- if (adddata) { legendGgplotFit(plt_3) } else NULL
-    mylegend_32 <- legendGgplotFit(plt_32)
-    
-    if (adddata) {
-      grid.arrange(plt_5, arrangeGrob(mylegend_1, mylegend_3, mylegend_32,
-                                      mylegend_2, nrow = 6), ncol = 2,
-                   widths = c(6, 2))
-    } else {
-      grid.arrange(plt_5, arrangeGrob(mylegend_32,
-                                      mylegend_2, nrow = 6), ncol = 2,
-                   widths = c(6, 2))
-    }
-  }
-  else { # no legend
-    plt_5 <- plt_4 + scale_x_continuous(breaks = data$transf_conc,
-                                        labels = data$conc)
-    return(plt_5)
-  }
-}
-
+#' Plotting method for \code{survFitTT} objects
+#'
+#' This is the generic \code{plot} S3 method for the \code{survFitTT} class. It
+#' plots concentration-response fit under target time survival analysis.
+#'
+#' The fitted curve represents the \strong{estimated survival probability} at
+#' the target time as a function of the concentration of chemical compound;
+#' When \code{adddata = TRUE} the black dots depict the \strong{observed survival
+#' probability} at each tested concentration. Note that since our model does not take
+#' inter-replicate variability into consideration, replicates are systematically
+#' pooled in this plot.
+#' The function plots both 95\% credible intervals for the estimated survival
+#' probability (by default the grey area around the fitted curve) and 95\% binomial confidence
+#' intervals for the observed survival probability (as black segments if
+#' \code{adddata = TRUE}).
+#' Both types of intervals are taken at the same level. Typically
+#' a good fit is expected to display a large overlap between the two intervals.
+#' If spaghetti = TRUE, the credible intervals are represented by two dotted
+#' lines limiting the credible band, and a spaghetti plot is added to this band.
+#' This spaghetti plot consists of the representation of simulated curves using parameter values
+#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
+#' taken for this sample).
+#'
+#' @param x an object of class \code{survFitTT}
+#' @param xlab a label for the \eqn{X}-axis, default is \code{Concentration}
+#' @param ylab a label for the \eqn{Y}-axis, default is \code{Survival probability}
+#' @param main main title for the plot
+#' @param fitcol color of the fitted curve
+#' @param fitlty line type of the fitted curve
+#' @param fitlwd width of the fitted curve
+#' @param spaghetti if \code{TRUE}, the credible interval is represented by 
+#' multiple curves
+#' @param cicol color of the 95 \% credible interval limits
+#' @param cilty line type for the 95 \% credible interval limits
+#' @param cilwd width of the 95 \% credible interval limits
+#' @param ribcol color of the ribbon between lower and upper credible limits.
+#' Transparent if \code{NULL}
+#' @param adddata if \code{TRUE}, adds the observed data with confidence intervals
+#' to the plot
+#' @param addlegend if \code{TRUE}, adds a default legend to the plot
+#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
+#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
+#' @param \dots Further arguments to be passed to generic methods
+#' @note When \code{style = "ggplot"}, the function calls function
+#' \code{\link[ggplot2]{ggplot}} and returns an object of class \code{ggplot}.
+#'
+#' @return a plot of class \code{ggplot}
+#'
+#' @keywords plot
+#'
+#' @import grDevices
+#' @import ggplot2
+#' @importFrom gridExtra grid.arrange arrangeGrob
+#' @importFrom grid grid.rect gpar
+#' @importFrom graphics plot axis legend lines par points polygon segments
+#' @importFrom stats aggregate
+#' @importFrom reshape2 melt
+#'
+#' @export
+plot.survFitTT <- function(x,
+                           xlab = "Concentration",
+                           ylab = "Survival probability",
+                           main = NULL,
+                           fitcol = "orange",
+                           fitlty = 1,
+                           fitlwd = 1,
+                           spaghetti = FALSE,
+                           cicol = "orange",
+                           cilty = 2,
+                           cilwd = 1,
+                           ribcol = "grey70",
+                           adddata = FALSE,
+                           addlegend = FALSE,
+                           log.scale = FALSE,
+                           style = "ggplot", ...) {
+  # plot the fitted curve estimated by survFitTT
+  # INPUTS
+  # - x:  survFitTt object
+  # - xlab : label x
+  # - ylab : label y
+  # - main : main title
+  # - fitcol : color fitted curve
+  # - fitlty : type line fitted curve
+  # - fitlwd : width line fitted curve
+  # - cicol : color ci ribbon
+  # - cilty : type line ci ribbon
+  # - cilwd : width line ci ribbon
+  # - addlegend : boolean
+  # - log.scale : x log option
+  # - style : generic or ggplot
+  # OUTPUT:
+  # - plot of fitted regression
+  
+  # Selection of datapoints that can be displayed given the type of scale
+  sel <- if(log.scale) x$dataTT$conc > 0 else TRUE
+  
+  dataTT <- x$dataTT[sel, ]
+  dataTT$resp <- dataTT$Nsurv / dataTT$Ninit
+  # data points are systematically pooled, since our model does not
+  # take individual variation into account
+  dataTT <- aggregate(resp ~ conc, dataTT, mean)
+  transf_data_conc <- optLogTransform(log.scale, dataTT$conc)
+  
+  # Concentration values used for display in linear scale
+  display.conc <- (function() {
+    x <- optLogTransform(log.scale, dataTT$conc)
+    s <- seq(min(x),max(x), length = 100)
+    if(log.scale) exp(s) else s
+  })()
+  
+  # Possibly log transformed concentration values for display
+  curv_conc <- optLogTransform(log.scale, display.conc)
+  
+  conf.int <- survLlbinomConfInt(x, log.scale)
+  cred.int <- survMeanCredInt(x, display.conc)
+  spaghetti.CI <- if (spaghetti) { survSpaghetti(x, display.conc) } else NULL
+  dataCIm <- if (spaghetti) {melt(cbind(curv_conc, spaghetti.CI),
+                                  id.vars = c("curv_conc", "conc"))} else NULL
+  
+  curv_resp <- data.frame(conc = curv_conc, resp = cred.int[["q50"]],
+                          Line = "loglogistic")
+  
+  if (style == "generic") {
+    survFitPlotGenericCredInt(x,
+                              dataTT$conc, transf_data_conc, dataTT$resp,
+                              curv_conc, curv_resp,
+                              conf.int, cred.int, spaghetti.CI, dataCIm,
+                              xlab, ylab, fitcol, fitlty, fitlwd,
+                              main, addlegend, adddata,
+                              cicol, cilty, cilwd, ribcol, log.scale)
+  }
+  else if (style == "ggplot") {
+    survFitPlotGG(x,
+                  dataTT$conc, transf_data_conc, dataTT$resp,
+                  curv_conc, curv_resp,
+                  conf.int, cred.int, spaghetti.CI, dataCIm,
+                  xlab, ylab, fitcol, fitlty, fitlwd,
+                  main, addlegend, adddata,
+                  cicol, cilty, cilwd / 2, ribcol)
+  }
+  else stop("Unknown style")
+}
+
+#' @importFrom stats aggregate binom.test
+survLlbinomConfInt <- function(x, log.scale) {
+  # create confidente interval on observed data for the log logistic
+  # binomial model by a binomial test
+  # INPUT:
+  # - x : object of class survFitTT
+  # - log.scale : boolean
+  # OUTPUT:
+  
+  # - ci : confidente interval
+  x <- cbind(aggregate(Nsurv ~ time + conc, x$dataTT, sum),
+             Ninit = aggregate(Ninit ~ time + conc, x$dataTT, sum)$Ninit)
+  
+  ci <- apply(x, 1, function(x) {
+    binom.test(x["Nsurv"], x["Ninit"])$conf.int
+  })
+  rownames(ci) <- c("qinf95", "qsup95")
+  colnames(ci) <- x$conc
+  
+  if (log.scale) ci <- ci[ ,colnames(ci) != 0]
+  
+  return(ci)
+}
+
+#' @importFrom stats quantile
+survMeanCredInt <- function(fit, x) {
+  # create the parameters for credible interval for the log logistic binomial
+  # model
+  # INPUT:
+  # - fit : object of class survFitTT
+  # - x : vector of concentrations values (x axis)
+  # OUTPUT:
+  # - ci : credible limit
+  
+  mctot <- do.call("rbind", fit$mcmc)
+  k <- nrow(mctot)
+  # parameters
+  if (fit$det.part == "loglogisticbinom_3") {
+    d2 <- mctot[, "d"]
+  }
+  log10b2 <- mctot[, "log10b"]
+  b2 <- 10^log10b2
+  log10e2 <- mctot[, "log10e"]
+  e2 <- 10^log10e2
+  
+  # quantiles
+  qinf95 = NULL
+  q50 = NULL
+  qsup95 = NULL
+  
+  for (i in 1:length(x)) {
+    # llbinom 2 parameters
+    if (fit$det.part == "loglogisticbinom_2") {
+      theomean <- 1 / (1 + (x[i] / e2)^(b2)) # mean curve
+    }
+    
+    # llbinom 3 parameters
+    else if (fit$det.part == "loglogisticbinom_3") {
+      theomean <- d2 / (1 + (x[i] / e2)^(b2)) # mean curve
+    }
+    # IC 95%
+    qinf95[i] <- quantile(theomean, probs = 0.025, na.rm = TRUE)
+    q50[i] <- quantile(theomean, probs = 0.5, na.rm = TRUE)
+    qsup95[i] <- quantile(theomean, probs = 0.975, na.rm = TRUE)
+  }
+  
+  # values for CI
+  ci <- data.frame(qinf95 = qinf95,
+                   q50 = q50,
+                   qsup95 = qsup95)
+  
+  return(ci)
+}
+
+survSpaghetti <- function(fit, x) {
+  mctot <- do.call("rbind", fit$mcmc)
+  sel <- sample(nrow(mctot))[1:ceiling(nrow(mctot) / 10)]
+  
+  # parameters
+  if (fit$det.part == "loglogisticbinom_3") {
+    d2 <- mctot[, "d"][sel]
+  }
+  log10b2 <- mctot[, "log10b"][sel]
+  b2 <- 10^log10b2
+  log10e2 <- mctot[, "log10e"][sel]
+  e2 <- 10^log10e2
+  
+  # all theorical
+  dtheo <- array(data = NA, dim = c(length(x), length(e2)))
+  for (i in 1:length(e2)) {
+    # llbinom 2 parameters
+    if (fit$det.part == "loglogisticbinom_2") {
+      dtheo[, i] <- 1 / (1 + (x / e2[i])^(b2[i])) # mean curve
+    }
+    # llbinom 3 parameters
+    else if (fit$det.part == "loglogisticbinom_3") {
+      dtheo[, i] <- d2[i] / (1 + (x / e2[i])^(b2[i])) # mean curve
+    }
+  }
+  dtheof <- as.data.frame(cbind(x, dtheo))
+  names(dtheof) <- c("conc", paste0("X", 1:length(sel)))
+  
+  return(dtheof)
+}
+
+survFitPlotGenericCredInt <- function(x,
+                                      data_conc, transf_data_conc, data_resp,
+                                      curv_conc, curv_resp,
+                                      conf.int, cred.int, spaghetti.CI, dataCIm,
+                                      xlab, ylab, fitcol, fitlty, fitlwd,
+                                      main, addlegend, adddata,
+                                      cicol, cilty, cilwd, ribcol, log.scale)
+{
+  # plot the fitted curve estimated by survFitTT
+  # with generic style with credible interval
+  plot(transf_data_conc, data_resp,
+       xlab = xlab,
+       ylab = ylab,
+       main = main,
+       xaxt = "n",
+       yaxt = "n",
+       ylim = c(0, 1.01),
+       type = "n")
+  
+  # axis
+  axis(side = 2, at = pretty(c(0, max(c(conf.int["qsup95",],
+                                        cred.int[["qsup95"]])))))
+  axis(side = 1,
+       at = transf_data_conc,
+       labels = data_conc)
+  
+  # Plotting the theoretical curve
+  # CI ribbon + lines
+  if (!is.null(spaghetti.CI)) {
+    color <- "gray"
+    color_transparent <- adjustcolor(color, alpha.f = 0.05)
+    by(dataCIm, dataCIm$variable, function(x) {
+      lines(x$curv_conc, x$value, col = color_transparent)
+    })
+  } else if(!is.null(ribcol)) {
+    polygon(c(curv_conc, rev(curv_conc)), c(cred.int[["qinf95"]],
+                                            rev(cred.int[["qsup95"]])),
+            col = ribcol, border = NA)
+  }
+  
+  lines(curv_conc, cred.int[["qsup95"]], type = "l", col = cicol, lty = cilty,
+        lwd = cilwd)
+  lines(curv_conc, cred.int[["qinf95"]], type = "l", col = cicol, lty = cilty,
+        lwd = cilwd)
+  
+  if (adddata) {
+    # segment CI
+    segments(transf_data_conc, data_resp,
+             transf_data_conc, conf.int["qsup95", ])
+    
+    segments(transf_data_conc, data_resp,
+             transf_data_conc, conf.int["qinf95", ])
+    
+    # points
+    points(transf_data_conc, data_resp, pch = 16)
+  }
+  
+  # fitted curve
+  lines(curv_conc, curv_resp[, "resp"], type = "l", col = fitcol, lty = fitlty,
+        lwd = fitlwd)
+  
+  # legend
+  if (addlegend) {
+    legend("bottomleft", pch = c(ifelse(adddata, 16, NA), NA, NA, NA),
+           lty = c(NA, ifelse(adddata, 1, NA), cilty, fitlty),
+           lwd = c(NA, ifelse(adddata,1, NA), cilwd, fitlwd),
+           col = c(ifelse(adddata, 1, NA), 1, cicol, fitcol),
+           legend = c(ifelse(adddata, "Observed values", NA),
+                      ifelse(adddata, "Confidence interval", NA),
+                      "Credible limits", x$det.part),
+           bty = "n")
+  }
+}
+
+#' @importFrom grid arrow unit
+survFitPlotGGCredInt <- function(x, data, curv_resp, conf.int, cred.int,
+                                 spaghetti.CI, dataCIm, cilty, cilwd,
+                                 valCols, fitlty, fitlwd, ribcol, xlab, ylab, main,
+                                 adddata) {
+  # IC
+  data.three <- data.frame(conc = data$transf_conc,
+                           qinf95 = conf.int["qinf95",],
+                           qsup95 = conf.int["qsup95",],
+                           Conf.Int = "Confidence interval")
+  data.four <- data.frame(conc = curv_resp$conc,
+                          qinf95 = cred.int[["qinf95"]],
+                          qsup95 = cred.int[["qsup95"]],
+                          Cred.Lim = "Credible limits")
+  
+  if (adddata) {
+    plt_3 <- ggplot(data) +
+      geom_segment(aes(x = conc, xend = conc, y = qinf95, yend = qsup95,
+                       linetype = Conf.Int), data.three,
+                   color = valCols$cols3) +
+      scale_linetype(name = "") +
+      theme_minimal()
+  }
+  
+  plt_302 <- if (!is.null(spaghetti.CI)) {
+    ggplot(data) + geom_line(data = dataCIm, aes(x = curv_conc, y = value,
+                                                 group = variable),
+                             col = "gray", alpha = 0.05)
+  } else {
+    ggplot(data) + geom_ribbon(data = data.four, aes(x = conc, ymin = qinf95,
+                                                     ymax = qsup95),
+                               fill = ribcol, col = NA, alpha = 0.4)
+  }
+  
+  plt_32 <- plt_302 +
+    geom_line(data = data.four, aes(conc, qinf95, color = Cred.Lim),
+              linetype = cilty, size = cilwd) +
+    geom_line(data = data.four, aes(conc, qsup95, color = Cred.Lim),
+              linetype = cilty, size = cilwd) +
+    scale_color_manual("", values = valCols$cols4) +
+    theme_minimal()
+  
+  # plot IC
+  # final plot
+  if (!is.null(spaghetti.CI)) {
+    plt_40 <- ggplot(data) +
+      geom_line(data = dataCIm, aes(x = curv_conc, y = value, group = variable),
+                col = "gray", alpha = 0.05)
+  } else {
+    plt_40 <- ggplot(data) + geom_ribbon(data = data.four, aes(x = conc,
+                                                               ymin = qinf95,
+                                                               ymax = qsup95),
+                                         fill = ribcol,
+                                         col = NA, alpha = 0.4)
+  }
+  
+  plt_401 <- plt_40 +
+    geom_line(data = data.four, aes(conc, qinf95),
+              linetype = cilty, size = cilwd, color = valCols$cols4) +
+    geom_line(data = data.four, aes(conc, qsup95),
+              linetype = cilty, size = cilwd, color = valCols$cols4) +
+    geom_line(data = curv_resp, aes(conc, resp),
+              linetype = fitlty, size = fitlwd, color = valCols$cols2) +
+    scale_color_discrete(guide = "none") +
+    ylim(0, 1) +
+    labs(x = xlab, y = ylab) +
+    ggtitle(main) + theme_minimal()
+  
+  if (adddata) {
+    plt_4 <- plt_401 + geom_point(data = data, aes(transf_conc, resp)) +
+      geom_segment(aes(x = conc, xend = conc, y = qinf95, yend = qsup95),
+                   data.three, col = valCols$cols3)
+  } else {
+    plt_4 <- plt_401
+  }
+  
+  return(list(plt_3 = if (adddata) plt_3 else NULL,
+              plt_32 = plt_32,
+              plt_4 = plt_4))
+}
+
+survFitPlotGG <- function(x,
+                          data_conc, transf_data_conc, data_resp,
+                          curv_conc, curv_resp,
+                          conf.int, cred.int, spaghetti.CI, dataCIm,
+                          xlab, ylab, fitcol, fitlty, fitlwd,
+                          main, addlegend, adddata,
+                          cicol, cilty, cilwd, ribcol) {
+  
+  
+  if (Sys.getenv("RSTUDIO") == "") {
+    dev.new() # create a new page plot
+    # when not use RStudio
+  }
+  
+  # dataframes points (data) and curve (curv)
+  data <- data.frame(conc = data_conc, transf_conc = transf_data_conc,
+                     resp = data_resp, Points = "Observed values")
+  
+  # colors
+  valCols <- fCols(data, fitcol, cicol)
+  
+  if (adddata) {
+    # points (to create the legend)
+    plt_1 <- ggplot(data) +
+      geom_point(data = data, aes(transf_conc, resp, fill = Points),
+                 col = valCols$cols1) + scale_fill_hue("") +
+      theme_minimal()
+  }
+  
+  # curve (to create the legend)
+  plt_2 <- ggplot(data) +
+    geom_line(data = curv_resp, aes(conc, resp, colour = Line),
+              linetype = fitlty, size = fitlwd) +
+    scale_colour_manual("", values = valCols$cols2) +
+    theme_minimal()
+  
+  plt_4 <-
+    survFitPlotGGCredInt(x, data, curv_resp, conf.int, cred.int, spaghetti.CI,
+                         dataCIm, cilty, cilwd, valCols, fitlty, fitlwd, ribcol,
+                         xlab, ylab, main, adddata)$plt_4
+  
+  if (addlegend) { # legend yes
+    # create legends
+    mylegend_1 <- if (adddata) { legendGgplotFit(plt_1) } else NULL # points legend
+    mylegend_2 <- legendGgplotFit(plt_2) # mean line legend
+    
+    plt_5 <- plt_4 + scale_x_continuous(breaks = data$transf_conc,
+                                        labels = data$conc)
+    
+    plt_3 <- survFitPlotGGCredInt(x, data, curv_resp, conf.int, cred.int, 
+                                  spaghetti.CI, dataCIm, cilty, cilwd,
+                                  valCols, fitlty, fitlwd, ribcol, xlab, ylab, main,
+                                  adddata)$plt_3
+    plt_32 <- survFitPlotGGCredInt(x, data, curv_resp, conf.int, cred.int, 
+                                   spaghetti.CI, dataCIm, cilty, cilwd,
+                                   valCols, fitlty, fitlwd, ribcol, xlab, ylab, main,
+                                   adddata)$plt_32
+    
+    mylegend_3 <- if (adddata) { legendGgplotFit(plt_3) } else NULL
+    mylegend_32 <- legendGgplotFit(plt_32)
+    
+    if (adddata) {
+      grid.arrange(plt_5, arrangeGrob(mylegend_1, mylegend_3, mylegend_32,
+                                      mylegend_2, nrow = 6), ncol = 2,
+                   widths = c(6, 2))
+    } else {
+      grid.arrange(plt_5, arrangeGrob(mylegend_32,
+                                      mylegend_2, nrow = 6), ncol = 2,
+                   widths = c(6, 2))
+    }
+  }
+  else { # no legend
+    plt_5 <- plt_4 + scale_x_continuous(breaks = data$transf_conc,
+                                        labels = data$conc)
+    return(plt_5)
+  }
+}
+
diff --git a/R/plot.survFitVarExp.R b/R/plot.survFitVarExp.R
index eb4fbb8728a5fa23a0ca7f1728e67d78e25e7755..ed80cfbea8d2717a38b14361772a0fda2942e3f9 100644
--- a/R/plot.survFitVarExp.R
+++ b/R/plot.survFitVarExp.R
@@ -1,164 +1,142 @@
-#' Plotting method for \code{survFit} objects
-#'
-#' This is the generic \code{plot} S3 method for the
-#' \code{survFit}.  It plots the fit obtained for each
-#' concentration profile in the original dataset.
-#'
-#' The fitted curves represent the \strong{estimated survival probability} as a function
-#' of time for each concentration profile.
-#' The black dots depict the \strong{observed survival
-#' probability} at each time point. Note that since our model does not take
-#' inter-replicate variability into consideration, replicates are systematically
-#' pooled in this plot.
-#' The function plots both 95\% binomial credible intervals for the estimated survival
-#' probability (by default the grey area around the fitted curve) and 95\% binomial confidence
-#' intervals for the observed survival probability (as black segments if
-#' \code{adddata = TRUE}).
-#' Both types of intervals are taken at the same level. Typically
-#' a good fit is expected to display a large overlap between the two types of  intervals.
-#' If \code{spaghetti = TRUE}, the credible intervals are represented by two
-#' dotted lines limiting the credible band, and a spaghetti plot is added to this band.
-#' This spaghetti plot consists of the representation of simulated curves using parameter values
-#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
-#' taken for this sample).
-#'
-#' @param x An object of class \code{survFit}.
-#' @param xlab A label for the \eqn{X}-axis, by default \code{Time}.
-#' @param ylab A label for the \eqn{Y}-axis, by default \code{Survival probability}.
-#' @param main A main title for the plot.
-#' @param spaghetti if \code{TRUE}, draws a set of survival curves using
-#' parameters drawn from the posterior distribution
-#' @param one.plot if \code{TRUE}, draws all the estimated curves in
-#' one plot instead of one plot per concentration.
-#' @param adddata if \code{TRUE}, adds the observed data to the plot.
-#' @param mcmc_size A numerical value refering by default to the size of the mcmc in object \code{survFit}.
-#'  This option is specific to \code{survFitVarExp} objects for which computing time may be long.
-#'  \code{mcmc_size} can be used to reduce the number of mcmc samples in order to speed up
-#'  the computation.
-#' @param scales Shape the scale of axis. Default is \code{"fixed"}, but can be \code{"free"}, or free
-#' in only one dimension \code{"free_x"}, \code{"free_y"}. (See \code{ggplot2} documentation
-#'  for more details.)
-#' @param addConfInt If \code{TRUE}, add a \eqn{95\%} confidence interval on observed data from a binomial test
-#' @param \dots Further arguments to be passed to generic methods.
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#' 
-#' @examples
-#'
-#' # (1) Load the survival data
-#' data("propiconazole_pulse_exposure")
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole_pulse_exposure)
-#'
-#' \donttest{
-#' # (3) Run the survFit function
-#' out <- survFit(dataset , model_type = "SD")
-#'
-#' # (4) Summary look the estimated values (parameters)
-#' summary(out)
-#'
-#' # (5) Plot the fitted curve
-#' plot(out, adddata = FALSE)
-#'
-#' # (6) Plot the fitted curve with ggplot style and CI as spaghetti
-#' plot(out, spaghetti = TRUE)
-#' }
-#' 
-#' @export
-#' 
-#' @importFrom stats predict
-#' @importFrom tidyr gather
-#'
-plot.survFitVarExp <- function(x,
-                               xlab = "Time",
-                               ylab = "Survival probability",
-                               main = NULL,
-                               spaghetti = FALSE,
-                               one.plot = FALSE,
-                               adddata = TRUE,
-                               mcmc_size = NULL,
-                               scales = "fixed",
-                               addConfInt = TRUE,
-                               ...) {
-  
-  
-  df_predictTotal <- predict(x, spaghetti = spaghetti, mcmc_size = mcmc_size)
-  
-  df_prediction <-  df_predictTotal$df_quantile
-  df_spaghetti <-  df_predictTotal$df_spaghetti
-  
-  df_observation <- filter(x$original.data, !is.na(Nsurv)) %>%
-    group_by(replicate) %>%
-    mutate(Ninit = max(Nsurv))
-  
-  # Plot
-  plt <- ggplot() +
-      theme_minimal() +
-      scale_x_continuous(name = xlab) +
-      scale_y_continuous(name = ylab,
-                         limits = c(0,1)) +
-      theme(legend.position = "top")
-  
-  # Observation
-  if(adddata == TRUE){
-    plt <- plt +
-      geom_point(data = df_observation,
-               aes(x = time, y = Nsurv/Ninit, group = replicate))
-  }
-  if(addConfInt == TRUE){
-    # create confidente interval on observed data
-    ci <- apply(df_observation, 1, function(x) {
-      binom.test(as.numeric(x["Nsurv"]), as.numeric(x["Ninit"]))$conf.int
-    })
-    conf.int <- as.data.frame(t(ci))
-    df_observation$conf_int_qinf95 <- conf.int[, 1]
-    df_observation$conf_int_qsup95 <- conf.int[, 2]
-    
-    plt <- plt +
-      geom_segment(data = df_observation,
-                   aes(x = time, xend = time,
-                       y = conf_int_qinf95, yend = conf_int_qsup95, group = replicate))
-    
-  }
-  
-  # spaghetti
-  if(spaghetti == TRUE){
-
-    df_spaghetti_gather <- df_spaghetti %>%
-      tidyr::gather(survRate_key, survRate_value, -c(time,conc,replicate))
-
-    plt <- plt +
-      geom_line(data = df_spaghetti_gather,
-                aes(x = time, y = survRate_value, group = interaction(survRate_key, replicate)),
-                alpha = 0.02) +
-      geom_line(data = df_prediction,
-                aes(x = time, y= qinf95, group = replicate),
-                color = "orange", linetype = 2) +
-      geom_line(data = df_prediction,
-                aes(x = time, y = qsup95, group = replicate),
-                color = "orange", linetype = 2)
-  }
-  if(spaghetti != TRUE){
-    plt <- plt + 
-      geom_ribbon(data = df_prediction,
-                  aes(x = time, ymin = qinf95,ymax = qsup95, group = replicate),
-                  fill = "grey", alpha = 0.4)
-  }
-  
-  # Prediction
-  plt <- plt +
-    geom_line(data = df_prediction,
-              aes(x = time, y = q50, group = replicate),
-              col="orange", size = 1)
-    
-  # facetting
-  if(one.plot == FALSE){
-    plt <- plt + facet_wrap(~ replicate, scales = scales)
-  }  
-      
-   return(plt)
-  }
-
+#' Plotting method for \code{survFit} objects
+#'
+#' This is the generic \code{plot} S3 method for the
+#' \code{survFit}.  It plots the fit obtained for each
+#' concentration profile in the original dataset.
+#'
+#' The fitted curves represent the \strong{estimated survival probability} as a function
+#' of time for each concentration profile.
+#' The black dots depict the \strong{observed survival
+#' probability} at each time point. Note that since our model does not take
+#' inter-replicate variability into consideration, replicates are systematically
+#' pooled in this plot.
+#' The function plots both 95\% binomial credible intervals for the estimated survival
+#' probability (by default the grey area around the fitted curve) and 95\% binomial confidence
+#' intervals for the observed survival probability (as black segments if
+#' \code{adddata = TRUE}).
+#' Both types of intervals are taken at the same level. Typically
+#' a good fit is expected to display a large overlap between the two types of  intervals.
+#' If \code{spaghetti = TRUE}, the credible intervals are represented by two
+#' dotted lines limiting the credible band, and a spaghetti plot is added to this band.
+#' This spaghetti plot consists of the representation of simulated curves using parameter values
+#' sampled in the posterior distribution (10\% of the MCMC chains are randomly
+#' taken for this sample).
+#'
+#' @param x An object of class \code{survFit}.
+#' @param xlab A label for the \eqn{X}-axis, by default \code{Time}.
+#' @param ylab A label for the \eqn{Y}-axis, by default \code{Survival probability}.
+#' @param main A main title for the plot.
+#' @param spaghetti if \code{TRUE}, draws a set of survival curves using
+#' parameters drawn from the posterior distribution
+#' @param one.plot if \code{TRUE}, draws all the estimated curves in
+#' one plot instead of one plot per concentration.
+#' @param adddata if \code{TRUE}, adds the observed data to the plot.
+#' @param mcmc_size A numerical value refering by default to the size of the mcmc in object \code{survFit}.
+#'  This option is specific to \code{survFitVarExp} objects for which computing time may be long.
+#'  \code{mcmc_size} can be used to reduce the number of mcmc samples in order to speed up
+#'  the computation.
+#' @param scales Shape the scale of axis. Default is \code{"fixed"}, but can be \code{"free"}, or free
+#' in only one dimension \code{"free_x"}, \code{"free_y"}. (See \code{ggplot2} documentation
+#'  for more details.)
+#' @param addConfInt If \code{TRUE}, add a \eqn{95\%} confidence interval on observed data from a binomial test
+#' @param \dots Further arguments to be passed to generic methods.
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#' 
+#' @export
+#' 
+#' @importFrom stats predict
+#' @importFrom tidyr gather
+#'
+plot.survFitVarExp <- function(x,
+                               xlab = "Time",
+                               ylab = "Survival probability",
+                               main = NULL,
+                               spaghetti = FALSE,
+                               one.plot = FALSE,
+                               adddata = TRUE,
+                               mcmc_size = NULL,
+                               scales = "fixed",
+                               addConfInt = TRUE,
+                               ...) {
+  
+  
+  df_predictTotal <- predict(x, spaghetti = spaghetti, mcmc_size = mcmc_size)
+  
+  df_prediction <-  df_predictTotal$df_quantile
+  df_spaghetti <-  df_predictTotal$df_spaghetti
+  
+  df_observation <- filter(x$original.data, !is.na(Nsurv)) %>%
+    group_by(replicate) %>%
+    mutate(Ninit = max(Nsurv))
+  
+  # Plot
+  plt <- ggplot() +
+      theme_minimal() +
+      scale_x_continuous(name = xlab) +
+      scale_y_continuous(name = ylab,
+                         limits = c(0,1)) +
+      theme(legend.position = "top")
+  
+  # Observation
+  if(adddata == TRUE){
+    plt <- plt +
+      geom_point(data = df_observation,
+               aes(x = time, y = Nsurv/Ninit, group = replicate))
+  }
+  if(addConfInt == TRUE){
+    # create confidente interval on observed data
+    ci <- apply(df_observation, 1, function(x) {
+      binom.test(as.numeric(x["Nsurv"]), as.numeric(x["Ninit"]))$conf.int
+    })
+    conf.int <- as.data.frame(t(ci))
+    df_observation$conf_int_qinf95 <- conf.int[, 1]
+    df_observation$conf_int_qsup95 <- conf.int[, 2]
+    
+    plt <- plt +
+      geom_segment(data = df_observation,
+                   aes(x = time, xend = time,
+                       y = conf_int_qinf95, yend = conf_int_qsup95, group = replicate))
+    
+  }
+  
+  # spaghetti
+  if(spaghetti == TRUE){
+
+    df_spaghetti_gather <- df_spaghetti %>%
+      tidyr::gather(survRate_key, survRate_value, -c(time,conc,replicate))
+
+    plt <- plt +
+      geom_line(data = df_spaghetti_gather,
+                aes(x = time, y = survRate_value, group = interaction(survRate_key, replicate)),
+                alpha = 0.02) +
+      geom_line(data = df_prediction,
+                aes(x = time, y= qinf95, group = replicate),
+                color = "orange", linetype = 2) +
+      geom_line(data = df_prediction,
+                aes(x = time, y = qsup95, group = replicate),
+                color = "orange", linetype = 2)
+  }
+  if(spaghetti != TRUE){
+    plt <- plt + 
+      geom_ribbon(data = df_prediction,
+                  aes(x = time, ymin = qinf95,ymax = qsup95, group = replicate),
+                  fill = "grey", alpha = 0.4)
+  }
+  
+  # Prediction
+  plt <- plt +
+    geom_line(data = df_prediction,
+              aes(x = time, y = q50, group = replicate),
+              col="orange", size = 1)
+    
+  # facetting
+  if(one.plot == FALSE){
+    plt <- plt + facet_wrap(~ replicate, scales = scales)
+  }  
+      
+   return(plt)
+  }
+
diff --git a/R/plotDoseResponse.reproData.R b/R/plotDoseResponse.reproData.R
index 867e42e06f23a9c0c20ec1205eb074964d3839fb..b99c991de871691cc39ebef1f7c7227ce55f36a2 100644
--- a/R/plotDoseResponse.reproData.R
+++ b/R/plotDoseResponse.reproData.R
@@ -1,200 +1,187 @@
-#' Plot dose-response from \code{reproData} objects
-#'
-#' This is the generic \code{plotDoseResponse} S3 method for the \code{reproData}
-#' class. It plots the number of offspring per individual-days as a function of
-#' concentration at a given target time.
-#' 
-#' The function plots the observed values of the reproduction rate (number of
-#' reproduction outputs per individual-day) at a given time point as a function of
-#' concentration. The 95 \% Poisson confidence interval is added to each reproduction
-#' rate. It is calculated using function \code{\link[epitools]{pois.exact}}
-#' from package \code{epitools}.
-#' As replicates are not pooled in this plot, overlapped points are shifted on
-#' the x-axis to help the visualization of replicates.
-#'
-#' @param x an object of class \code{reproData}
-#' @param xlab a label for the \eqn{X}-axis, by default \code{Concentration}
-#' @param ylab a label for the \eqn{Y}-axis, by default \code{Nb of offspring per ind.day}
-#' @param main main title for the plot
-#' @param ylim \eqn{Y}-axis limits
-#' @param target.time a numeric value corresponding to some observed time points in \code{data}
-#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
-#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
-#' @param remove.someLabels if \code{TRUE}, removes 75\% of \eqn{X}-axis labels in
-#' \code{'ggplot'} style to avoid the label overlap
-#' @param axis if \code{TRUE} displays ticks and label axis
-#' @param addlegend if \code{TRUE}, adds a default legend to the plot
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @note When \code{style = "generic"}, the function calls the generic function
-#' \code{\link[graphics]{plot}}
-#' @note When \code{style = "ggplot"}, the function return an object of class
-#' \code{ggplot}, see function \code{\link[ggplot2]{ggplot}} 
-#' 
-#' @seealso \code{\link[epitools]{pois.exact}}
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#' # (1) Load the data
-#' data(zinc)
-#' 
-#' # (2) Create an object of class 'reproData'
-#' zinc_rpr <- reproData(zinc)
-#'
-#' # (3) Plot dose-response
-#' plotDoseResponse(zinc_rpr)
-#'
-#' # (4) Plot dose-response with a generic style
-#' plotDoseResponse(zinc_rpr, style = "generic")
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom dplyr filter
-#' @importFrom graphics plot axis lines points
-#' title
-#' @importFrom methods is
-#' @importFrom stats aggregate
-#' @importFrom epitools pois.exact
-#'
-#' @export
-plotDoseResponse.reproData <- function(x,
-                                       xlab = "Concentration",
-                                       ylab = "Nb of offspring per ind.day",
-                                       main = NULL,
-                                       ylim = NULL,
-                                       target.time = NULL,
-                                       style = "ggplot",
-                                       log.scale = FALSE,
-                                       remove.someLabels = FALSE,
-                                       axis = TRUE,
-                                       addlegend = TRUE,
-                                       ...) {
-  if (is.null(target.time)) target.time <- max(x$time)
-  
-  if (!target.time %in% x$time || target.time == 0)
-    stop("[target.time] is not one of the possible time !")
-  
-  if (style == "generic" && remove.someLabels)
-    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
-            call. = FALSE)
-  
-  x$resp <- x$Nreprocumul / x$Nindtime
-  
-  # select the target.time
-  xf <- filter(x, x$time == target.time)
-  
-  # Selection of datapoints that can be displayed given the type of scale
-  sel <- if(log.scale) xf$conc > 0 else TRUE
-  x <- xf[sel, ]
-  transf_data_conc <- optLogTransform(log.scale, x$conc)
-  
-  # Concentration values used for display in linear scale
-  display.conc <- (function() {
-    x <- optLogTransform(log.scale, x$conc)
-    if(log.scale) exp(x) else x
-  })()
-  
-  ICpois <- pois.exact(x$Nreprocumul, x$Nindtime)
-  x$reproRateInf <- ICpois$lower
-  x$reproRateSup <- ICpois$upper
-  conc_val <- unique(transf_data_conc)
-  x$Obs <- x$conc
-  stepX <- stepCalc(conc_val)$stepX
-  jittered_conc <- jitterObsGenerator(stepX, x, conc_val)$jitterObs
-  
-  if (style == "generic")
-    reproDoseResponseCIGeneric(x, conc_val, jittered_conc, transf_data_conc,
-                               display.conc, ylim, axis, main, addlegend)
-  else if (style == "ggplot")
-    reproDoseResponseCIGG(x, conc_val, jittered_conc, transf_data_conc,
-                          display.conc, main, addlegend, remove.someLabels)
-  else stop("Unknown style")
-}
-
-reproDoseResponseCIGeneric <- function(x, conc_val, jittered_conc,
-                                       transf_data_conc, display.conc, ylim,
-                                       axis, main, addlegend) {
-  
-  x <- as.data.frame(x)
-  if (is.null(ylim)) ylim <- c(0, max(x$reproRateSup))
-  plot(jittered_conc, x$resp,
-       ylim = ylim,
-       type = "n",
-       xaxt = "n",
-       yaxt = "n",
-       main = main,
-       xlab = if (axis) { "Concentration" } else "",
-       ylab = if (axis) { "Reproduction rate"} else "")
-  
-  # axis
-  if (axis) {
-    axis(side = 2, at = pretty(c(0, max(x$resp))))
-    axis(side = 1, at = transf_data_conc,
-         labels = display.conc)
-  }
-  
-  x0 <- x[order(x$conc),]
-  segments(jittered_conc, x0[, "reproRateInf"],
-           jittered_conc, x0[, "reproRateSup"])
-  
-  points(jittered_conc, x0$resp, pch = 20)
-  
-  if (addlegend) {
-    legend("bottomleft", pch = c(20, NA),
-           lty = c(NA, 1),
-           lwd = c(NA, 1),
-           col = c(1, 1),
-           legend = c("Observed values", "Confidence intervals"),
-           bty = "n")
-  }
-}
-
-reproDoseResponseCIGG <- function(x, conc_val, jittered_conc, transf_data_conc,
-                                  display.conc, main, addlegend,
-                                  remove.someLabels) {
-  
-  x0 <- cbind(x[order(x$conc),], jittered_conc = as.vector(jittered_conc))
-  
-  df <- data.frame(x0,
-                   transf_data_conc,
-                   display.conc,
-                   Points = "Observed values")
-  
-  dfCI <- data.frame(x0,
-                     transf_data_conc,
-                     display.conc,
-                     Conf.Int = "Confidence intervals")
-  
-  # colors
-  valCols <- fCols(df, fitcol = NA, cicol = NA)
-  
-  gf <- ggplot(dfCI) + geom_segment(aes(x = jittered_conc, xend = jittered_conc,
-                                        y = reproRateInf, yend = reproRateSup,
-                                        linetype = Conf.Int),
-                                    data = dfCI,
-                                    col = valCols$cols3) +
-    geom_point(aes(x = jittered_conc, y = resp, fill = Points), df,
-               col = valCols$cols1) +
-    scale_fill_hue("") +
-    scale_linetype(name = "") +
-    expand_limits(y = 0) +
-    ggtitle(main) +
-    labs(x = "Concentration", y = "Reproduction rate") +
-    scale_x_continuous(breaks = unique(transf_data_conc),
-                       labels = if (remove.someLabels) {
-                         exclude_labels(unique(display.conc))
-                       } else {
-                         unique(display.conc)
-                       }
-    ) +
-    theme_minimal()
-  
-  if (addlegend) {
-    gf
-  } else {
-    gf + theme(legend.position = "none") # remove legend
-  }
-}
+#' Plot dose-response from \code{reproData} objects
+#'
+#' This is the generic \code{plotDoseResponse} S3 method for the \code{reproData}
+#' class. It plots the number of offspring per individual-days as a function of
+#' concentration at a given target time.
+#' 
+#' The function plots the observed values of the reproduction rate (number of
+#' reproduction outputs per individual-day) at a given time point as a function of
+#' concentration. The 95 \% Poisson confidence interval is added to each reproduction
+#' rate. It is calculated using function \code{\link[epitools]{pois.exact}}
+#' from package \code{epitools}.
+#' As replicates are not pooled in this plot, overlapped points are shifted on
+#' the x-axis to help the visualization of replicates.
+#'
+#' @param x an object of class \code{reproData}
+#' @param xlab a label for the \eqn{X}-axis, by default \code{Concentration}
+#' @param ylab a label for the \eqn{Y}-axis, by default \code{Nb of offspring per ind.day}
+#' @param main main title for the plot
+#' @param ylim \eqn{Y}-axis limits
+#' @param target.time a numeric value corresponding to some observed time points in \code{data}
+#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
+#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
+#' @param remove.someLabels if \code{TRUE}, removes 75\% of \eqn{X}-axis labels in
+#' \code{'ggplot'} style to avoid the label overlap
+#' @param axis if \code{TRUE} displays ticks and label axis
+#' @param addlegend if \code{TRUE}, adds a default legend to the plot
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @note When \code{style = "generic"}, the function calls the generic function
+#' \code{\link[graphics]{plot}}
+#' @note When \code{style = "ggplot"}, the function return an object of class
+#' \code{ggplot}, see function \code{\link[ggplot2]{ggplot}} 
+#' 
+#' @seealso \code{\link[epitools]{pois.exact}}
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom dplyr filter
+#' @importFrom graphics plot axis lines points
+#' title
+#' @importFrom methods is
+#' @importFrom stats aggregate
+#' @importFrom epitools pois.exact
+#'
+#' @export
+plotDoseResponse.reproData <- function(x,
+                                       xlab = "Concentration",
+                                       ylab = "Nb of offspring per ind.day",
+                                       main = NULL,
+                                       ylim = NULL,
+                                       target.time = NULL,
+                                       style = "ggplot",
+                                       log.scale = FALSE,
+                                       remove.someLabels = FALSE,
+                                       axis = TRUE,
+                                       addlegend = TRUE,
+                                       ...) {
+  if (is.null(target.time)) target.time <- max(x$time)
+  
+  if (!target.time %in% x$time || target.time == 0)
+    stop("[target.time] is not one of the possible time !")
+  
+  if (style == "generic" && remove.someLabels)
+    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
+            call. = FALSE)
+  
+  x$resp <- x$Nreprocumul / x$Nindtime
+  
+  # select the target.time
+  xf <- filter(x, x$time == target.time)
+  
+  # Selection of datapoints that can be displayed given the type of scale
+  sel <- if(log.scale) xf$conc > 0 else TRUE
+  x <- xf[sel, ]
+  transf_data_conc <- optLogTransform(log.scale, x$conc)
+  
+  # Concentration values used for display in linear scale
+  display.conc <- (function() {
+    x <- optLogTransform(log.scale, x$conc)
+    if(log.scale) exp(x) else x
+  })()
+  
+  ICpois <- pois.exact(x$Nreprocumul, x$Nindtime)
+  x$reproRateInf <- ICpois$lower
+  x$reproRateSup <- ICpois$upper
+  conc_val <- unique(transf_data_conc)
+  x$Obs <- x$conc
+  stepX <- stepCalc(conc_val)$stepX
+  jittered_conc <- jitterObsGenerator(stepX, x, conc_val)$jitterObs
+  
+  if (style == "generic")
+    reproDoseResponseCIGeneric(x, conc_val, jittered_conc, transf_data_conc,
+                               display.conc, ylim, axis, main, addlegend)
+  else if (style == "ggplot")
+    reproDoseResponseCIGG(x, conc_val, jittered_conc, transf_data_conc,
+                          display.conc, main, addlegend, remove.someLabels)
+  else stop("Unknown style")
+}
+
+reproDoseResponseCIGeneric <- function(x, conc_val, jittered_conc,
+                                       transf_data_conc, display.conc, ylim,
+                                       axis, main, addlegend) {
+  
+  x <- as.data.frame(x)
+  if (is.null(ylim)) ylim <- c(0, max(x$reproRateSup))
+  plot(jittered_conc, x$resp,
+       ylim = ylim,
+       type = "n",
+       xaxt = "n",
+       yaxt = "n",
+       main = main,
+       xlab = if (axis) { "Concentration" } else "",
+       ylab = if (axis) { "Reproduction rate"} else "")
+  
+  # axis
+  if (axis) {
+    axis(side = 2, at = pretty(c(0, max(x$resp))))
+    axis(side = 1, at = transf_data_conc,
+         labels = display.conc)
+  }
+  
+  x0 <- x[order(x$conc),]
+  segments(jittered_conc, x0[, "reproRateInf"],
+           jittered_conc, x0[, "reproRateSup"])
+  
+  points(jittered_conc, x0$resp, pch = 20)
+  
+  if (addlegend) {
+    legend("bottomleft", pch = c(20, NA),
+           lty = c(NA, 1),
+           lwd = c(NA, 1),
+           col = c(1, 1),
+           legend = c("Observed values", "Confidence intervals"),
+           bty = "n")
+  }
+}
+
+reproDoseResponseCIGG <- function(x, conc_val, jittered_conc, transf_data_conc,
+                                  display.conc, main, addlegend,
+                                  remove.someLabels) {
+  
+  x0 <- cbind(x[order(x$conc),], jittered_conc = as.vector(jittered_conc))
+  
+  df <- data.frame(x0,
+                   transf_data_conc,
+                   display.conc,
+                   Points = "Observed values")
+  
+  dfCI <- data.frame(x0,
+                     transf_data_conc,
+                     display.conc,
+                     Conf.Int = "Confidence intervals")
+  
+  # colors
+  valCols <- fCols(df, fitcol = NA, cicol = NA)
+  
+  gf <- ggplot(dfCI) + geom_segment(aes(x = jittered_conc, xend = jittered_conc,
+                                        y = reproRateInf, yend = reproRateSup,
+                                        linetype = Conf.Int),
+                                    data = dfCI,
+                                    col = valCols$cols3) +
+    geom_point(aes(x = jittered_conc, y = resp, fill = Points), df,
+               col = valCols$cols1) +
+    scale_fill_hue("") +
+    scale_linetype(name = "") +
+    expand_limits(y = 0) +
+    ggtitle(main) +
+    labs(x = "Concentration", y = "Reproduction rate") +
+    scale_x_continuous(breaks = unique(transf_data_conc),
+                       labels = if (remove.someLabels) {
+                         exclude_labels(unique(display.conc))
+                       } else {
+                         unique(display.conc)
+                       }
+    ) +
+    theme_minimal()
+  
+  if (addlegend) {
+    gf
+  } else {
+    gf + theme(legend.position = "none") # remove legend
+  }
+}
diff --git a/R/plotDoseResponse.survDataCstExp.R b/R/plotDoseResponse.survDataCstExp.R
index 48fc96784948cee3d67d59703c0cdab5b117e1d9..a924d2c87329f96e8108e4c3dcace9517e36e39d 100644
--- a/R/plotDoseResponse.survDataCstExp.R
+++ b/R/plotDoseResponse.survDataCstExp.R
@@ -1,206 +1,190 @@
-#' Plot dose-response from \code{survData} objects
-#'
-#' This is the generic \code{plotDoseResponse} S3 method for the \code{survData}
-#' class. It plots the survival probability as a function of concentration at a given
-#' target time.
-#' 
-#' The function plots the observed values of the survival probability at a given time point
-#' as a function of concentration. The 95 \% binomial confidence interval is added
-#' to each survival probability. It is calculated using function
-#' \code{\link[stats]{binom.test}} from package \code{stats}.
-#' Replicates are systematically pooled in this plot.
-#'
-#' @param x an object of class \code{survData}
-#' @param xlab a label for the \eqn{X}-axis, by default \code{Concentration}
-#' @param ylab a label for the \eqn{Y}-axis, by default \code{Survival probability}
-#' @param main main title for the plot
-#' @param target.time a numeric value corresponding to some observed time in \code{data}
-#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
-#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
-#' @param remove.someLabels if \code{TRUE}, removes 75\% of X-axis labels in
-#' \code{'ggplot'} style to avoid the label overlap
-#' @param addlegend if \code{TRUE}, adds a default legend to the plot
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @note When \code{style = "generic"}, the function calls the generic function
-#' \code{\link[graphics]{plot}}
-#' @note When \code{style = "ggplot"}, the function return an object of class
-#'  \code{ggplot}, see function \code{\link[ggplot2]{ggplot}} 
-#' 
-#' 
-#' @seealso \code{\link[stats]{binom.test}}
-#'
-#' @keywords plot
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#'
-#' library(ggplot2)
-#'
-#' # (1) Load the data
-#' data(zinc)
-#' 
-#' # (2) Create an object of class 'survData'
-#' zinc <- survData(zinc)
-#'
-#' # (3) Plot dose-response
-#' plotDoseResponse(zinc)
-#'
-#' # (4) Plot dose-respo nse with a generic style
-#' plotDoseResponse(zinc, style = "generic")
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom dplyr filter
-#' @importFrom grid arrow unit
-#' @importFrom graphics plot axis lines points segments title
-#' @importFrom methods is
-#' @importFrom stats aggregate
-#'
-#' @export
-plotDoseResponse.survDataCstExp <- function(x,
-                                            xlab = "Concentration",
-                                            ylab = "Survival probability",
-                                            main = NULL,
-                                            target.time = NULL,
-                                            style = "ggplot",
-                                            log.scale = FALSE,
-                                            remove.someLabels = FALSE,
-                                            addlegend = TRUE,
-                                            ...) {
-  if (is.null(target.time)) target.time <- max(x$time)
-  
-  if (!target.time %in% x$time || target.time == 0)
-    stop("[target.time] is not one of the possible time !")
-  
-  if (style == "generic" && remove.someLabels)
-    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
-            call. = FALSE)
-
-  # agregate by sum of replicate
-  x <- cbind(aggregate(cbind(Nsurv, Ninit) ~ time + conc, x, sum),
-             replicate = 1)
-  
-  x$resp <- x$Nsurv / x$Ninit
-  # select the target.time
-  xf <- filter(x, x$time == target.time)
-  
-  conf.int <- survConfInt(xf, log.scale)
-  
-  # Selection of datapoints that can be displayed given the type of scale
-  sel <- if(log.scale) xf$conc > 0 else TRUE
-  x <- xf[sel, ]
-  transf_data_conc <- optLogTransform(log.scale, x$conc)
-  
-  # Concentration values used for display in linear scale
-  display.conc <- (function() {
-    x <- optLogTransform(log.scale, x$conc)
-    if(log.scale) exp(x) else x
-  })()
-  
-  # vector color
-  x$color <- as.numeric(as.factor(x$replicate))
-  
-  if (style == "generic") {
-    plot(transf_data_conc, seq(0, max(conf.int["qsup95",]),
-                               length.out = length(transf_data_conc)),
-         type = "n",
-         xaxt = "n",
-         yaxt = "n",
-         main = main,
-         xlab = xlab,
-         ylab = ylab)
-    
-    axis(side = 1, at = transf_data_conc,
-         labels = display.conc)
-    axis(side = 2, at = unique(round(pretty(c(0, max(x$resp))))),
-         labels = unique(round(pretty(c(0, max(x$resp))))))
-    
-    # points
-    points(transf_data_conc, x$resp,
-           pch = 20)
-    
-    # segment CI
-    
-    segments(transf_data_conc, x$resp,
-             transf_data_conc, conf.int["qsup95", ])
-    
-    segments(transf_data_conc, x$resp,
-             transf_data_conc, conf.int["qinf95", ])
-    
-    # add legend
-    if (addlegend) {
-      legend("bottomleft", pch = c(20, NA),
-             lty = c(NA, 1),
-             lwd = c(NA, 1),
-             col = c(1, 1),
-             legend = c("Observed values", "Confidence intervals"),
-             bty = "n")
-    }
-  }
-  else if (style == "ggplot") {
-    # colors
-    valCols <- fCols(x, fitcol = NA, cicol = NA)
-    
-    df <- data.frame(x,
-                     transf_data_conc,
-                     display.conc,
-                     Points = "Observed values")
-    dfCI <- data.frame(conc = transf_data_conc,
-                       qinf95 = conf.int["qinf95",],
-                       qsup95 = conf.int["qsup95",],
-                       Conf.Int = "Confidence intervals")
-    
-    fd <- ggplot(df) +
-      geom_point(aes(x = transf_data_conc, y = resp, fill = Points),
-                 data = df, col = valCols$cols1) +
-      geom_segment(aes(x = conc, xend = conc, y = qinf95,
-                       yend = qsup95,
-                       linetype = Conf.Int),
-                   dfCI, col = valCols$cols3) +
-      scale_fill_hue("") +
-      scale_linetype(name = "") +
-      expand_limits(x = 0, y = 0) + ggtitle(main) +
-      theme_minimal() +
-      labs(x = xlab,
-           y = ylab) +
-      scale_x_continuous(breaks = unique(df$transf_data_conc),
-                         labels = if (remove.someLabels) {
-                           exclude_labels(unique(df$display.conc))
-                         } else {
-                           unique(df$display.conc)
-                         }
-      ) +
-      scale_y_continuous(breaks = unique(round(pretty(c(0, max(df$resp)))))) +
-      expand_limits(x = 0, y = 0)
-    
-if (addlegend) {
-  fd
-} else {
-  fd + theme(legend.position = "none") # remove legend
-}
-  }
-  else stop("Unknown plot style")
-}
-
-#' @importFrom stats aggregate binom.test
-survConfInt <- function(x, log.scale) {
-  # compute confidente interval on observed data
-  # binomial model by a binomial test
-  # INPUT:
-  # - x : object of class survFitTT
-  # - log.scale : boolean
-  # OUTPUT:
-  
-  # - ci : confidente interval
-  ci <- apply(x, 1, function(x) {
-    binom.test(x["Nsurv"], x["Ninit"])$conf.int
-  })
-  rownames(ci) <- c("qinf95", "qsup95")
-  colnames(ci) <- x$conc
-  
-  if (log.scale) ci <- ci[ ,colnames(ci) != 0]
-  
-  return(ci)
-}
+#' Plot dose-response from \code{survData} objects
+#'
+#' This is the generic \code{plotDoseResponse} S3 method for the \code{survData}
+#' class. It plots the survival probability as a function of concentration at a given
+#' target time.
+#' 
+#' The function plots the observed values of the survival probability at a given time point
+#' as a function of concentration. The 95 \% binomial confidence interval is added
+#' to each survival probability. It is calculated using function
+#' \code{\link[stats]{binom.test}} from package \code{stats}.
+#' Replicates are systematically pooled in this plot.
+#'
+#' @param x an object of class \code{survData}
+#' @param xlab a label for the \eqn{X}-axis, by default \code{Concentration}
+#' @param ylab a label for the \eqn{Y}-axis, by default \code{Survival probability}
+#' @param main main title for the plot
+#' @param target.time a numeric value corresponding to some observed time in \code{data}
+#' @param style graphical backend, can be \code{'ggplot'} or \code{'generic'}
+#' @param log.scale if \code{TRUE}, displays \eqn{X}-axis in log-scale
+#' @param remove.someLabels if \code{TRUE}, removes 75\% of X-axis labels in
+#' \code{'ggplot'} style to avoid the label overlap
+#' @param addlegend if \code{TRUE}, adds a default legend to the plot
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @note When \code{style = "generic"}, the function calls the generic function
+#' \code{\link[graphics]{plot}}
+#' @note When \code{style = "ggplot"}, the function return an object of class
+#'  \code{ggplot}, see function \code{\link[ggplot2]{ggplot}} 
+#' 
+#' @seealso \code{\link[stats]{binom.test}}
+#'
+#' @keywords plot
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom dplyr filter
+#' @importFrom grid arrow unit
+#' @importFrom graphics plot axis lines points segments title
+#' @importFrom methods is
+#' @importFrom stats aggregate
+#'
+#' @export
+plotDoseResponse.survDataCstExp <- function(x,
+                                            xlab = "Concentration",
+                                            ylab = "Survival probability",
+                                            main = NULL,
+                                            target.time = NULL,
+                                            style = "ggplot",
+                                            log.scale = FALSE,
+                                            remove.someLabels = FALSE,
+                                            addlegend = TRUE,
+                                            ...) {
+  if (is.null(target.time)) target.time <- max(x$time)
+  
+  if (!target.time %in% x$time || target.time == 0)
+    stop("[target.time] is not one of the possible time !")
+  
+  if (style == "generic" && remove.someLabels)
+    warning("'remove.someLabels' argument is valid only in 'ggplot' style.",
+            call. = FALSE)
+
+  # agregate by sum of replicate
+  x <- cbind(aggregate(cbind(Nsurv, Ninit) ~ time + conc, x, sum),
+             replicate = 1)
+  
+  x$resp <- x$Nsurv / x$Ninit
+  # select the target.time
+  xf <- filter(x, x$time == target.time)
+  
+  conf.int <- survConfInt(xf, log.scale)
+  
+  # Selection of datapoints that can be displayed given the type of scale
+  sel <- if(log.scale) xf$conc > 0 else TRUE
+  x <- xf[sel, ]
+  transf_data_conc <- optLogTransform(log.scale, x$conc)
+  
+  # Concentration values used for display in linear scale
+  display.conc <- (function() {
+    x <- optLogTransform(log.scale, x$conc)
+    if(log.scale) exp(x) else x
+  })()
+  
+  # vector color
+  x$color <- as.numeric(as.factor(x$replicate))
+  
+  if (style == "generic") {
+    plot(transf_data_conc, seq(0, max(conf.int["qsup95",]),
+                               length.out = length(transf_data_conc)),
+         type = "n",
+         xaxt = "n",
+         yaxt = "n",
+         main = main,
+         xlab = xlab,
+         ylab = ylab)
+    
+    axis(side = 1, at = transf_data_conc,
+         labels = display.conc)
+    axis(side = 2, at = unique(round(pretty(c(0, max(x$resp))))),
+         labels = unique(round(pretty(c(0, max(x$resp))))))
+    
+    # points
+    points(transf_data_conc, x$resp,
+           pch = 20)
+    
+    # segment CI
+    
+    segments(transf_data_conc, x$resp,
+             transf_data_conc, conf.int["qsup95", ])
+    
+    segments(transf_data_conc, x$resp,
+             transf_data_conc, conf.int["qinf95", ])
+    
+    # add legend
+    if (addlegend) {
+      legend("bottomleft", pch = c(20, NA),
+             lty = c(NA, 1),
+             lwd = c(NA, 1),
+             col = c(1, 1),
+             legend = c("Observed values", "Confidence intervals"),
+             bty = "n")
+    }
+  }
+  else if (style == "ggplot") {
+    # colors
+    valCols <- fCols(x, fitcol = NA, cicol = NA)
+    
+    df <- data.frame(x,
+                     transf_data_conc,
+                     display.conc,
+                     Points = "Observed values")
+    dfCI <- data.frame(conc = transf_data_conc,
+                       qinf95 = conf.int["qinf95",],
+                       qsup95 = conf.int["qsup95",],
+                       Conf.Int = "Confidence intervals")
+    
+    fd <- ggplot(df) +
+      geom_point(aes(x = transf_data_conc, y = resp, fill = Points),
+                 data = df, col = valCols$cols1) +
+      geom_segment(aes(x = conc, xend = conc, y = qinf95,
+                       yend = qsup95,
+                       linetype = Conf.Int),
+                   dfCI, col = valCols$cols3) +
+      scale_fill_hue("") +
+      scale_linetype(name = "") +
+      expand_limits(x = 0, y = 0) + ggtitle(main) +
+      theme_minimal() +
+      labs(x = xlab,
+           y = ylab) +
+      scale_x_continuous(breaks = unique(df$transf_data_conc),
+                         labels = if (remove.someLabels) {
+                           exclude_labels(unique(df$display.conc))
+                         } else {
+                           unique(df$display.conc)
+                         }
+      ) +
+      scale_y_continuous(breaks = unique(round(pretty(c(0, max(df$resp)))))) +
+      expand_limits(x = 0, y = 0)
+    
+if (addlegend) {
+  fd
+} else {
+  fd + theme(legend.position = "none") # remove legend
+}
+  }
+  else stop("Unknown plot style")
+}
+
+#' @importFrom stats aggregate binom.test
+survConfInt <- function(x, log.scale) {
+  # compute confidente interval on observed data
+  # binomial model by a binomial test
+  # INPUT:
+  # - x : object of class survFitTT
+  # - log.scale : boolean
+  # OUTPUT:
+  
+  # - ci : confidente interval
+  ci <- apply(x, 1, function(x) {
+    binom.test(x["Nsurv"], x["Ninit"])$conf.int
+  })
+  rownames(ci) <- c("qinf95", "qsup95")
+  colnames(ci) <- x$conc
+  
+  if (log.scale) ci <- ci[ ,colnames(ci) != 0]
+  
+  return(ci)
+}
diff --git a/R/ppc.reproFitTT.R b/R/ppc.reproFitTT.R
index 699bac79aa86686367cfbd397e121bc7924c58e7..9c3814666232911ed6b557edcdd29b60cd43b9ef 100644
--- a/R/ppc.reproFitTT.R
+++ b/R/ppc.reproFitTT.R
@@ -1,104 +1,87 @@
-#' Posterior predictive check plot for \code{reproFitTT} objects
-#'
-#' This is the generic \code{ppc} S3 method for the \code{reproFitTT} class.
-#' It plots the predicted values with 95\% credible intervals versus the observed
-#' values.
-#' 
-#' The coordinates of black points are the observed values of the cumulated number
-#' of reproduction outputs for a given concentration (\eqn{X}-scale) and the corresponding 
-#' predicted values (\eqn{Y}-scale). 95\% prediction intervals are added to each predicted
-#' value, colored in green if this interval contains the observed value and in red
-#' in the other case. As replicates are not pooled in this plot, overlapped points
-#' are shifted on the \eqn{X-}axis to help the visualization of replicates. The bisecting
-#' line (y = x) is added to the plot in order to see if each prediction interval
-#' contains each observed value. As replicates are shifted on the \eqn{X}-axis, this
-#' line may be represented by steps.
-#'
-#' @rdname PPC
-#' 
-#' @param x An object of class \code{reproFitTT}
-#' @param xlab A label for the \eqn{X}-axis, by default \code{Observed Cumul. Nbr. of offspring}
-#' @param ylab A label for the \eqn{Y}-axis, by default \code{Predicted Cumul. Nbr. of offspring}
-#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
-#' @param main main title for the plot
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#'
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "reproData"
-#' dataset <- reproData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the reproFitTT function with the log-logistic gamma-Poisson model
-#' out <- reproFitTT(dataset, stoc.part = "gammapoisson",
-#' ecx = c(5, 10, 15, 20, 30, 50, 80), quiet = TRUE)
-#'
-#' # (4) Plot observed versus predicted values
-#' ppc(out)
-#' }
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom graphics plot
-#'
-#' @export
-ppc.reproFitTT <- function(x,
-                           style = "ggplot", 
-                           xlab = "Observed Cumul. Nbr. of offspring",
-                           ylab = "Predicted Cumul. Nbr. of offspring",
-                           main = NULL, ...) {
-  if (!is(x, "reproFitTT"))
-    stop("x is not of class 'reproFitTT'!")
-
-  ppc_gen(EvalreproPpc(x), style, xlab, ylab, main)
-}
-
-
-#' @importFrom stats rgamma rpois quantile
-EvalreproPpc <- function(x) {
-  tot.mcmc <- do.call("rbind", x$mcmc)
-
-  if (x$model.label == "GP") {
-    omega <- 10^tot.mcmc[, "log10omega"]
-  }
-  b <- 10^tot.mcmc[, "log10b"]
-  d <- tot.mcmc[, "d"]
-  e <- 10^tot.mcmc[, "log10e"]
-
-  niter <- nrow(tot.mcmc)
-  n <- x$jags.data$n
-  xconc <- x$jags.data$xconc
-  Nindtime <- x$jags.data$Nindtime
-  NcumulObs <- x$jags.data$Ncumul
-  NcumulPred <- matrix(NA, nrow = niter, ncol = n)
-
-  if (x$model.label == "GP") {
-    for (i in 1:n) {
-      popmean <- d / (1 + (xconc[i]/e)^b)
-      indmean <- rgamma(n = niter, shape = popmean / omega, rate = 1 / omega)
-      NcumulPred[, i] <- rpois(niter, indmean * Nindtime[i])
-    }
-
-  }
-  if (x$model.label == "P") {
-    for (i in 1:n) {
-      ytheo <- d / (1 + (xconc[i]/e)^b)
-      nbtheo <- ytheo * Nindtime[i]
-      NcumulPred[, i] <- rpois(niter, nbtheo)
-    }
-  }
-  QNreproPred <- t(apply(NcumulPred, 2, quantile,
-                         probs = c(2.5, 50, 97.5) / 100))
-  tab <- data.frame(QNreproPred,
-                    Nindtime, NcumulObs,
-                    col = ifelse(QNreproPred[,"2.5%"] > NcumulObs | QNreproPred[,"97.5%"] < NcumulObs,
-                                 "red", "green"))
-  colnames(tab) <- c("P2.5", "P50", "P97.5", "Nindtime", "Obs", "col")
-
-  return(tab)
-}
+#' Posterior predictive check plot for \code{reproFitTT} objects
+#'
+#' This is the generic \code{ppc} S3 method for the \code{reproFitTT} class.
+#' It plots the predicted values with 95\% credible intervals versus the observed
+#' values.
+#' 
+#' The coordinates of black points are the observed values of the cumulated number
+#' of reproduction outputs for a given concentration (\eqn{X}-scale) and the corresponding 
+#' predicted values (\eqn{Y}-scale). 95\% prediction intervals are added to each predicted
+#' value, colored in green if this interval contains the observed value and in red
+#' in the other case. As replicates are not pooled in this plot, overlapped points
+#' are shifted on the \eqn{X-}axis to help the visualization of replicates. The bisecting
+#' line (y = x) is added to the plot in order to see if each prediction interval
+#' contains each observed value. As replicates are shifted on the \eqn{X}-axis, this
+#' line may be represented by steps.
+#'
+#' @rdname PPC
+#' 
+#' @param x An object of class \code{reproFitTT}
+#' @param xlab A label for the \eqn{X}-axis, by default \code{Observed Cumul. Nbr. of offspring}
+#' @param ylab A label for the \eqn{Y}-axis, by default \code{Predicted Cumul. Nbr. of offspring}
+#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
+#' @param main main title for the plot
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return a plot of class \code{ggplot}
+#' 
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom graphics plot
+#'
+#' @export
+ppc.reproFitTT <- function(x,
+                           style = "ggplot", 
+                           xlab = "Observed Cumul. Nbr. of offspring",
+                           ylab = "Predicted Cumul. Nbr. of offspring",
+                           main = NULL, ...) {
+  if (!is(x, "reproFitTT"))
+    stop("x is not of class 'reproFitTT'!")
+
+  ppc_gen(EvalreproPpc(x), style, xlab, ylab, main)
+}
+
+
+#' @importFrom stats rgamma rpois quantile
+EvalreproPpc <- function(x) {
+  tot.mcmc <- do.call("rbind", x$mcmc)
+
+  if (x$model.label == "GP") {
+    omega <- 10^tot.mcmc[, "log10omega"]
+  }
+  b <- 10^tot.mcmc[, "log10b"]
+  d <- tot.mcmc[, "d"]
+  e <- 10^tot.mcmc[, "log10e"]
+
+  niter <- nrow(tot.mcmc)
+  n <- x$jags.data$n
+  xconc <- x$jags.data$xconc
+  Nindtime <- x$jags.data$Nindtime
+  NcumulObs <- x$jags.data$Ncumul
+  NcumulPred <- matrix(NA, nrow = niter, ncol = n)
+
+  if (x$model.label == "GP") {
+    for (i in 1:n) {
+      popmean <- d / (1 + (xconc[i]/e)^b)
+      indmean <- rgamma(n = niter, shape = popmean / omega, rate = 1 / omega)
+      NcumulPred[, i] <- rpois(niter, indmean * Nindtime[i])
+    }
+
+  }
+  if (x$model.label == "P") {
+    for (i in 1:n) {
+      ytheo <- d / (1 + (xconc[i]/e)^b)
+      nbtheo <- ytheo * Nindtime[i]
+      NcumulPred[, i] <- rpois(niter, nbtheo)
+    }
+  }
+  QNreproPred <- t(apply(NcumulPred, 2, quantile,
+                         probs = c(2.5, 50, 97.5) / 100))
+  tab <- data.frame(QNreproPred,
+                    Nindtime, NcumulObs,
+                    col = ifelse(QNreproPred[,"2.5%"] > NcumulObs | QNreproPred[,"97.5%"] < NcumulObs,
+                                 "red", "green"))
+  colnames(tab) <- c("P2.5", "P50", "P97.5", "Nindtime", "Obs", "col")
+
+  return(tab)
+}
diff --git a/R/ppc.survFitCstExp.R b/R/ppc.survFitCstExp.R
index 4ced8e7618490478b641e35723fbce29963c6c5f..484f1338f93f3c2ccc49b5443cba0933cb2a79b6 100644
--- a/R/ppc.survFitCstExp.R
+++ b/R/ppc.survFitCstExp.R
@@ -1,158 +1,142 @@
-#' Posterior predictive check plot for \code{survFitCstExp} objects
-#'
-#' This is the generic \code{ppc} S3 method for the \code{survFitCstExp} class. It
-#' plots the predicted values along with 95\% credible intervals
-#' versus the observed values for \code{survFit} objects.
-#'
-#' The black points show the observed number of survivors (pooled
-#' replicates, on \eqn{X}-axis) against the corresponding predicted
-#' number (\eqn{Y}-axis). Predictions come along with 95\% prediction
-#' intervals, which are depicted in green when they contain the
-#' observed value and in red otherwise. Samples with equal observed
-#' value are shifted on the \eqn{X}-axis. For that reason, the
-#' bisecting line (y = x), is represented by steps when observed
-#' values are low. That way we ensure green intervals do intersect the
-#' bisecting line.
-#'
-#' @rdname PPC
-#' 
-#' @param x An object of class \code{survFitCstExp}
-#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
-#' @param main main title for the plot
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#'
-#' # (1) Load the data
-#' data(propiconazole)
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole)
-#'
-#' \donttest{
-#' # (3) Run the survFitTKTD function with the TKTD model ('SD' or 'IT')
-#' out <- survFit(dataset, model_type = "SD")
-#'
-#' # (4) Plot observed versus predicted values
-#' ppc(out)
-#' }
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom graphics plot
-#'
-#' @export
-#' 
-ppc.survFitCstExp <- function(x, style = "ggplot", main = NULL, ...) {
-
-  xlab <- "Observed nb of survivors"
-  ylab <- "Predicted nb of survivors"
-  
-  ppc_gen(EvalsurvTKTDPpc_CstExp(x), style, xlab, ylab, main)
-}
-
-#' @importFrom stats rbinom quantile plogis
-EvalsurvTKTDPpc_CstExp <- function(x) {
-  tot.mcmc <- do.call("rbind", x$mcmc)
-
-  model_type <- x$model_type
-  
-  kd <- 10^(tot.mcmc[, "kd_log10"])
-  # "hb" is not in survFit object of morse <v3.2.0
-  if("hb" %in% colnames(tot.mcmc)){
-    hb <- tot.mcmc[, "hb"]  
-  } else{ hb <- 10^tot.mcmc[, "hb_log10"] }
-
-  if(model_type == "SD"){
-    z <- 10^(tot.mcmc[, "z_log10"])
-    kk <- 10^(tot.mcmc[, "kk_log10"])
-  } else if (model_type == "IT"){
-    alpha <- 10^(tot.mcmc[, "alpha_log10"])
-    beta <- 10^(tot.mcmc[, "beta_log10"])
-  } else{
-    stop("'model_type' must be 'SD' or 'IT'")
-  }
-  
-  #NsurvObs <- x$jags.data$y
-  NsurvObs <- x$jags.data$Nsurv
-  
-  #n <- x$jags.data$ndat
-  n <- x$jags.data$n_data
-  
-  #xconc <- x$jags.data$x
-  xconc <- x$jags.data$conc
-  
-  #t <- x$jags.data$t
-  time <- x$jags.data$time
-  
-  Nprec <- x$jags.data$Nprec
-  
-  if(model_type == "SD"){
-    
-    niter <- nrow(tot.mcmc)
-   
-    tprec <- x$jags.data$tprec
-    
-    NsurvPred <- matrix(NA, nrow = niter, ncol = n)
-    psurv = NULL
-    
-    # bigtime <- x$jags.data$bigtime
-    bigtime <- max(time) + 10
-    
-    for (i in 1:n) {
-      for (j in 1:length(kd)) {
-        xcor <- ifelse(xconc[i] > 0, xconc[i], 10)
-        R <- ifelse(xconc[i] > z[j], z[j]/xcor, 0.1)
-        tz <- ifelse(xconc[i] > z[j], -1 / kd[j] * log(1 - R), bigtime)
-        tref <- max(tprec[i], tz)
-        psurv[j] <- exp(-hb[j] * (time[i] - tprec[i]) +
-                          if (time[i] > tz) {
-                            -kk[j] * ((xconc[i] - z[j]) * (time[i] - tref) +
-                                        xconc[i]/kd[j] * (exp(-kd[j] * time[i]) - exp(-kd[j] * tref)))
-                          } else {
-                            0
-                          })
-      }
-      NsurvPred[, i] <- rbinom(niter, Nprec[i], psurv)
-    }
-    NsurvPred <- t(NsurvPred)
-  }
-  if(model_type == "IT"){
-    
-    D.max <- matrix(nrow = length(kd), ncol = length(xconc) )
-    for(j in 1:n){
-      # xconc[j] * (1-exp(-kd * time[j])) is always the max compared to previous time !
-      D.max[,j] <- xconc[j] * (1-exp(-kd * time[j])) 
-    }
-    dtheo.IT <-  exp(-hb %*% t(time)) * (1 - plogis(log(D.max), location = log(alpha), scale = 1/beta))
-    
-    # transpose dtheo
-    dtheo <- t(dtheo.IT)
-    # dtheo <- do.call("rbind", lapply(dtheo.IT, t))
-    
-    i_prec = x$jags.data$i_prec
-    ncol_NsurvPred = ncol(dtheo)
-    
-    NsurvPred = matrix(NA, ncol = ncol_NsurvPred, nrow = nrow(dtheo))
-    
-    for(i in 1:nrow(dtheo)){
-      NsurvPred[i, ] = rbinom(ncol_NsurvPred, size = Nprec[i], prob = as.numeric(dtheo[i,] / dtheo[i_prec[i],]))
-    }
-    
-  }
-  
-  QNsurvPred <- t(apply(NsurvPred, 1, quantile,
-                        probs = c(2.5, 50, 97.5) / 100, na.rm = TRUE))
-  
-  tab <- data.frame(QNsurvPred,
-                    Nprec,
-                    NsurvObs,
-                    col = ifelse(QNsurvPred[,"2.5%"] > NsurvObs |
-                                   QNsurvPred[,"97.5%"] < NsurvObs,
-                                 "red", "green"))
-  colnames(tab) <- c("P2.5", "P50", "P97.5", "Nprec", "Obs", "col")
-  
-  return(tab)
-}
+#' Posterior predictive check plot for \code{survFitCstExp} objects
+#'
+#' This is the generic \code{ppc} S3 method for the \code{survFitCstExp} class. It
+#' plots the predicted values along with 95\% credible intervals
+#' versus the observed values for \code{survFit} objects.
+#'
+#' The black points show the observed number of survivors (pooled
+#' replicates, on \eqn{X}-axis) against the corresponding predicted
+#' number (\eqn{Y}-axis). Predictions come along with 95\% prediction
+#' intervals, which are depicted in green when they contain the
+#' observed value and in red otherwise. Samples with equal observed
+#' value are shifted on the \eqn{X}-axis. For that reason, the
+#' bisecting line (y = x), is represented by steps when observed
+#' values are low. That way we ensure green intervals do intersect the
+#' bisecting line.
+#'
+#' @rdname PPC
+#' 
+#' @param x An object of class \code{survFitCstExp}
+#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
+#' @param main main title for the plot
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom graphics plot
+#'
+#' @export
+#' 
+ppc.survFitCstExp <- function(x, style = "ggplot", main = NULL, ...) {
+
+  xlab <- "Observed nb of survivors"
+  ylab <- "Predicted nb of survivors"
+  
+  ppc_gen(EvalsurvTKTDPpc_CstExp(x), style, xlab, ylab, main)
+}
+
+#' @importFrom stats rbinom quantile plogis
+EvalsurvTKTDPpc_CstExp <- function(x) {
+  tot.mcmc <- do.call("rbind", x$mcmc)
+
+  model_type <- x$model_type
+  
+  kd <- 10^(tot.mcmc[, "kd_log10"])
+  # "hb" is not in survFit object of morse <v3.2.0
+  if("hb" %in% colnames(tot.mcmc)){
+    hb <- tot.mcmc[, "hb"]  
+  } else{ hb <- 10^tot.mcmc[, "hb_log10"] }
+
+  if(model_type == "SD"){
+    z <- 10^(tot.mcmc[, "z_log10"])
+    kk <- 10^(tot.mcmc[, "kk_log10"])
+  } else if (model_type == "IT"){
+    alpha <- 10^(tot.mcmc[, "alpha_log10"])
+    beta <- 10^(tot.mcmc[, "beta_log10"])
+  } else{
+    stop("'model_type' must be 'SD' or 'IT'")
+  }
+  
+  #NsurvObs <- x$jags.data$y
+  NsurvObs <- x$jags.data$Nsurv
+  
+  #n <- x$jags.data$ndat
+  n <- x$jags.data$n_data
+  
+  #xconc <- x$jags.data$x
+  xconc <- x$jags.data$conc
+  
+  #t <- x$jags.data$t
+  time <- x$jags.data$time
+  
+  Nprec <- x$jags.data$Nprec
+  
+  if(model_type == "SD"){
+    
+    niter <- nrow(tot.mcmc)
+   
+    tprec <- x$jags.data$tprec
+    
+    NsurvPred <- matrix(NA, nrow = niter, ncol = n)
+    psurv = NULL
+    
+    # bigtime <- x$jags.data$bigtime
+    bigtime <- max(time) + 10
+    
+    for (i in 1:n) {
+      for (j in 1:length(kd)) {
+        xcor <- ifelse(xconc[i] > 0, xconc[i], 10)
+        R <- ifelse(xconc[i] > z[j], z[j]/xcor, 0.1)
+        tz <- ifelse(xconc[i] > z[j], -1 / kd[j] * log(1 - R), bigtime)
+        tref <- max(tprec[i], tz)
+        psurv[j] <- exp(-hb[j] * (time[i] - tprec[i]) +
+                          if (time[i] > tz) {
+                            -kk[j] * ((xconc[i] - z[j]) * (time[i] - tref) +
+                                        xconc[i]/kd[j] * (exp(-kd[j] * time[i]) - exp(-kd[j] * tref)))
+                          } else {
+                            0
+                          })
+      }
+      NsurvPred[, i] <- rbinom(niter, Nprec[i], psurv)
+    }
+    NsurvPred <- t(NsurvPred)
+  }
+  if(model_type == "IT"){
+    
+    D.max <- matrix(nrow = length(kd), ncol = length(xconc) )
+    for(j in 1:n){
+      # xconc[j] * (1-exp(-kd * time[j])) is always the max compared to previous time !
+      D.max[,j] <- xconc[j] * (1-exp(-kd * time[j])) 
+    }
+    dtheo.IT <-  exp(-hb %*% t(time)) * (1 - plogis(log(D.max), location = log(alpha), scale = 1/beta))
+    
+    # transpose dtheo
+    dtheo <- t(dtheo.IT)
+    # dtheo <- do.call("rbind", lapply(dtheo.IT, t))
+    
+    i_prec = x$jags.data$i_prec
+    ncol_NsurvPred = ncol(dtheo)
+    
+    NsurvPred = matrix(NA, ncol = ncol_NsurvPred, nrow = nrow(dtheo))
+    
+    for(i in 1:nrow(dtheo)){
+      NsurvPred[i, ] = rbinom(ncol_NsurvPred, size = Nprec[i], prob = as.numeric(dtheo[i,] / dtheo[i_prec[i],]))
+    }
+    
+  }
+  
+  QNsurvPred <- t(apply(NsurvPred, 1, quantile,
+                        probs = c(2.5, 50, 97.5) / 100, na.rm = TRUE))
+  
+  tab <- data.frame(QNsurvPred,
+                    Nprec,
+                    NsurvObs,
+                    col = ifelse(QNsurvPred[,"2.5%"] > NsurvObs |
+                                   QNsurvPred[,"97.5%"] < NsurvObs,
+                                 "red", "green"))
+  colnames(tab) <- c("P2.5", "P50", "P97.5", "Nprec", "Obs", "col")
+  
+  return(tab)
+}
diff --git a/R/ppc.survFitTKTD.R b/R/ppc.survFitTKTD.R
index 30e506ddb0627267a6584e6d11d57e8894108138..dddaefe59fad390d77f36c06ad77c7d4d382b437 100644
--- a/R/ppc.survFitTKTD.R
+++ b/R/ppc.survFitTKTD.R
@@ -1,104 +1,88 @@
-#' Posterior predictive check plot for \code{survFitTKTD} objects
-#'
-#' This is the generic \code{ppc} S3 method for the \code{survFitTKTD} class. It
-#' plots the predicted values along with 95\% credible intervals
-#' versus the observed values for \code{survFitTKTD} objects.
-#' 
-#' The black points show the observed number of survivors (pooled
-#' replicates, on \eqn{X}-axis) against the corresponding predicted
-#' number (\eqn{Y}-axis). Predictions come along with 95\% prediction
-#' intervals, which are depicted in green when they contain the
-#' observed value and in red otherwise. Samples with equal observed
-#' value are shifted on the \eqn{X}-axis. For that reason, the
-#' bisecting line (y = x), is represented by steps when observed
-#' values are low. That way we ensure green intervals do intersect the
-#' bisecting line.
-#' 
-#' @rdname PPC
-#'
-#' @param x An object of class \code{survFitTKTD}
-#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
-#' @param main main title for the plot
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#'
-#' # (1) Load the data
-#' data(propiconazole)
-#'
-#' # (2) Create an object of class "survData"
-#' dat <- survData(propiconazole)
-#'
-#' \donttest{
-#' # (3) Run the survFitTKTD function with the TKTD model ('SD' only)
-#' out <- survFitTKTD(dat)
-#'
-#' # (4) Plot observed versus predicted values
-#' ppc(out)
-#' }
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom graphics plot
-#' 
-#' @export
-ppc.survFitTKTD <- function(x, style = "ggplot", main = NULL,...) {
-  if (!is(x, "survFitTKTD"))
-    stop("x is not of class 'survFitTKTD'!")
-  
-  xlab <- "Observed nb of survivors"
-  ylab <- "Predicted nb of survivors"
-  
-  ppc_gen(EvalsurvTKTDPpc(x), style, xlab, ylab, main)
-}
-
-#' @importFrom stats rbinom quantile
-EvalsurvTKTDPpc <- function(x) {
-  tot.mcmc <- do.call("rbind", x$mcmc)
-
-  kd <- 10^(tot.mcmc[, "log10kd"])
-  ks <- 10^(tot.mcmc[, "log10ks"])
-  nec <- 10^(tot.mcmc[, "log10NEC"])
-  m0 <- 10^(tot.mcmc[, "log10m0"])
-  
-  niter <- nrow(tot.mcmc)
-  n <- x$jags.data$ndat
-  xconc <- x$jags.data$x
-  t <- x$jags.data$t
-  tprec <- x$jags.data$tprec
-  NsurvObs <- x$jags.data$y
-  Nprec <- x$jags.data$Nprec
-  bigtime <- x$jags.data$bigtime
-  NsurvPred <- matrix(NA, nrow = niter, ncol = n)
-  psurv = NULL
-  for (i in 1:n) {
-    for (j in 1:length(kd)) {
-      xcor <- ifelse(xconc[i] > 0, xconc[i], 10)
-      R <- ifelse(xconc[i] > nec[j], nec[j]/xcor, 0.1)
-      tNEC <- ifelse(xconc[i] > nec[j], -1 / kd[j] * log(1 - R), bigtime)
-      tref <- max(tprec[i], tNEC)
-      psurv[j] <- exp(-m0[j] * (t[i] - tprec[i]) +
-                        if (t[i] > tNEC) {
-                          -ks[j] * ((xconc[i] - nec[j]) * (t[i] - tref) +
-                                      xconc[i]/kd[j] * (exp(-kd[j] * t[i]) - exp(-kd[j] * tref)))
-                        } else {
-                          0
-                        })
-    }
-    NsurvPred[, i] <- rbinom(niter, Nprec[i], psurv)
-  }
-
-  QNsurvPred <- t(apply(NsurvPred, 2, quantile,
-                        probs = c(2.5, 50, 97.5) / 100, na.rm = TRUE))
-  tab <- data.frame(QNsurvPred,
-                    Nprec, NsurvObs,
-                    col = ifelse(QNsurvPred[,"2.5%"] > NsurvObs |
-                                   QNsurvPred[,"97.5%"] < NsurvObs,
-                                 "red", "green"))
-  colnames(tab) <- c("P2.5", "P50", "P97.5", "Nprec", "Obs", "col")
-  
-  return(tab)
-}
-
+#' Posterior predictive check plot for \code{survFitTKTD} objects
+#'
+#' This is the generic \code{ppc} S3 method for the \code{survFitTKTD} class. It
+#' plots the predicted values along with 95\% credible intervals
+#' versus the observed values for \code{survFitTKTD} objects.
+#' 
+#' The black points show the observed number of survivors (pooled
+#' replicates, on \eqn{X}-axis) against the corresponding predicted
+#' number (\eqn{Y}-axis). Predictions come along with 95\% prediction
+#' intervals, which are depicted in green when they contain the
+#' observed value and in red otherwise. Samples with equal observed
+#' value are shifted on the \eqn{X}-axis. For that reason, the
+#' bisecting line (y = x), is represented by steps when observed
+#' values are low. That way we ensure green intervals do intersect the
+#' bisecting line.
+#' 
+#' @rdname PPC
+#'
+#' @param x An object of class \code{survFitTKTD}
+#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
+#' @param main main title for the plot
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom graphics plot
+#' 
+#' @export
+ppc.survFitTKTD <- function(x, style = "ggplot", main = NULL, ...) {
+  if (!is(x, "survFitTKTD"))
+    stop("x is not of class 'survFitTKTD'!")
+  
+  xlab <- "Observed nb of survivors"
+  ylab <- "Predicted nb of survivors"
+  
+  ppc_gen(EvalsurvTKTDPpc(x), style, xlab, ylab, main)
+}
+
+#' @importFrom stats rbinom quantile
+EvalsurvTKTDPpc <- function(x) {
+  tot.mcmc <- do.call("rbind", x$mcmc)
+
+  kd <- 10^(tot.mcmc[, "log10kd"])
+  ks <- 10^(tot.mcmc[, "log10ks"])
+  nec <- 10^(tot.mcmc[, "log10NEC"])
+  m0 <- 10^(tot.mcmc[, "log10m0"])
+  
+  niter <- nrow(tot.mcmc)
+  n <- x$jags.data$ndat
+  xconc <- x$jags.data$x
+  t <- x$jags.data$t
+  tprec <- x$jags.data$tprec
+  NsurvObs <- x$jags.data$y
+  Nprec <- x$jags.data$Nprec
+  bigtime <- x$jags.data$bigtime
+  NsurvPred <- matrix(NA, nrow = niter, ncol = n)
+  psurv = NULL
+  for (i in 1:n) {
+    for (j in 1:length(kd)) {
+      xcor <- ifelse(xconc[i] > 0, xconc[i], 10)
+      R <- ifelse(xconc[i] > nec[j], nec[j]/xcor, 0.1)
+      tNEC <- ifelse(xconc[i] > nec[j], -1 / kd[j] * log(1 - R), bigtime)
+      tref <- max(tprec[i], tNEC)
+      psurv[j] <- exp(-m0[j] * (t[i] - tprec[i]) +
+                        if (t[i] > tNEC) {
+                          -ks[j] * ((xconc[i] - nec[j]) * (t[i] - tref) +
+                                      xconc[i]/kd[j] * (exp(-kd[j] * t[i]) - exp(-kd[j] * tref)))
+                        } else {
+                          0
+                        })
+    }
+    NsurvPred[, i] <- rbinom(niter, Nprec[i], psurv)
+  }
+
+  QNsurvPred <- t(apply(NsurvPred, 2, quantile,
+                        probs = c(2.5, 50, 97.5) / 100, na.rm = TRUE))
+  tab <- data.frame(QNsurvPred,
+                    Nprec, NsurvObs,
+                    col = ifelse(QNsurvPred[,"2.5%"] > NsurvObs |
+                                   QNsurvPred[,"97.5%"] < NsurvObs,
+                                 "red", "green"))
+  colnames(tab) <- c("P2.5", "P50", "P97.5", "Nprec", "Obs", "col")
+  
+  return(tab)
+}
+
diff --git a/R/ppc.survFitTT.R b/R/ppc.survFitTT.R
index 49917d0429be0ac34956f926da33522e987280ab..714bc2acc0ac9a09d720690e4ba6af47fc6aec27 100644
--- a/R/ppc.survFitTT.R
+++ b/R/ppc.survFitTT.R
@@ -1,187 +1,170 @@
-#' Posterior predictive check plot for \code{survFitTT} objects
-#'
-#' This is the generic \code{ppc} S3 method for the \code{survFitTT} class. It
-#' plots the predicted values with 95 \% credible intervals versus the observed
-#' values for \code{survFitTT} objects.
-#' 
-#' The coordinates of black points are the observed values of the number of survivors
-#' (pooled replicates) for a given concentration (\eqn{X}-axis) and the corresponding 
-#' predicted values (\eqn{Y}-axis). 95\% prediction intervals are added to each predicted
-#' value, colored in green if this interval contains the observed value and in red
-#' otherwise.
-#' The bisecting line (y = x) is added to the plot in order to see if each
-#' prediction interval contains each observed value. As replicates are shifted
-#' on the x-axis, this line is represented by steps.
-#'
-#' @rdname PPC
-#'
-#' @param x An object of class \code{survFitTT}
-#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
-#' @param main main title for the plot
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#'
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "survData"
-#' dat <- survData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the survFitTT function with the log-logistic binomial model
-#' out <- survFitTT(dat, lcx = c(5, 10, 15, 20, 30, 50, 80),
-#' quiet = TRUE)
-#'
-#' # (4) Plot observed versus predicted values
-#' ppc(out)
-#' }
-#'
-#' @import ggplot2
-#' @import grDevices
-#' @importFrom graphics plot
-#' 
-#' @export
-ppc.survFitTT <- function(x, style = "ggplot", main = NULL, ...) {
-  if (!is(x, "survFitTT"))
-    stop("x is not of class 'survFitTT'!")
-  
-  xlab <- "Observed nb of survivors"
-  ylab <- "Predicted nb of survivors"
-  
-  ppc_gen(EvalsurvPpc(x), style, xlab, ylab, main)
-}
-
-ppc_gen <- function(tab, style, xlab, ylab, main) {
-  
-  if (style == "generic") PpcGeneric(tab, xlab, ylab, main)
-  else if (style == "ggplot") PpcGG(tab, xlab, ylab, main)
-  else stop("Unknown style")
-}
-
-#' @importFrom stats rbinom quantile
-EvalsurvPpc <- function(x) {
-  tot.mcmc <- do.call("rbind", x$mcmc)
-  
-  if (x$det.part == "loglogisticbinom_3") {
-    d <- tot.mcmc[, "d"]
-  }
-  
-  b <- 10^tot.mcmc[, "log10b"]
-  e <- 10^tot.mcmc[, "log10e"]
-  
-  niter <- nrow(tot.mcmc)
-  n <- x$jags.data$n
-  xconc <- x$jags.data$xconc
-  Ninit <- x$jags.data$Ninit
-  NsurvObs <- x$jags.data$Nsurv
-  NsurvPred <- matrix(NA, nrow = niter, ncol = n)
-  
-  if (x$det.part == "loglogisticbinom_2") {
-    for (i in 1:n) {
-      p <- 1 / (1 + (xconc[i]/e)^b)
-      NsurvPred[, i] <- rbinom(niter, Ninit[i], p)
-    }
-  }
-  if (x$det.part == "loglogisticbinom_3") {
-    for (i in 1:n) {
-      p <- d / (1 + (xconc[i]/e)^b)
-      NsurvPred[, i] <- rbinom(niter, Ninit[i], p)
-    }
-  }
-  QNsurvPred <- t(apply(NsurvPred, 2, quantile,
-                        probs = c(2.5, 50, 97.5) / 100))
-  tab <- data.frame(QNsurvPred,
-                    Ninit, NsurvObs,
-                    col = ifelse(QNsurvPred[,"2.5%"] > NsurvObs |
-                                   QNsurvPred[,"97.5%"] < NsurvObs,
-                                 "red", "green"))
-  colnames(tab) <- c("P2.5", "P50", "P97.5", "Ninit", "Obs", "col")
-  
-  return(tab)
-}
-
-#' @importFrom graphics abline segments
-PpcGeneric <- function(tab, xlab, ylab, main) {
-  obs_val <- unique(tab[, "Obs"])
-  sObs <- stepCalc(obs_val)$sObs
-  stepX <- stepCalc(obs_val)$stepX
-  jittered_obs <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$jitterObs
-  spaceX <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$spaceX
-  
-  plot(c(0, max(tab[, "P97.5"])),
-       c(0, max(tab[, "P97.5"])),
-       type = "n",
-       xlab = xlab,
-       ylab = ylab,
-       main = main,
-       xaxt = "n",
-       yaxt = "n")
-  
-  # axis
-  axis(side = 2, at = if (max(tab[, "Obs"]) == 1) {
-    c(0, 1)
-  } else {
-    pretty(c(0, max(tab[, "P97.5"])))
-  })
-  axis(side = 1, at = if (max(tab[, "Obs"]) == 1) {
-    c(0, 1)
-  } else {
-    pretty(c(0, max(tab[, "P97.5"])))
-  })
-  
-  if (max(sObs) < 20) {
-    sapply(1:length(sObs), function(i) {
-      segments(sObs[i] - (spaceX * 1.25), sObs[i],
-               sObs[i] + (spaceX * 1.25), sObs[i])
-    })
-  } else {
-    abline(0, 1)
-  }
-  
-  tab0 <- tab[order(tab$Obs),]
-  segments(jittered_obs, tab0[, "P2.5"],
-           jittered_obs, tab0[, "P97.5"],
-           col = as.character(tab0[, "col"]))
-  
-  points(jittered_obs, tab0[, "P50"],
-         pch = 20)
-}
-
-#' @import ggplot2
-#' @importFrom  grid arrow unit
-PpcGG <- function(tab, xlab, ylab, main) {
-  obs_val <- unique(tab[, "Obs"])
-  sObs <- stepCalc(obs_val)$sObs
-  stepX <- stepCalc(obs_val)$stepX
-  jittered_obs <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$jitterObs
-  spaceX <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$spaceX
-  
-  tab0 <- cbind(tab[order(tab$Obs),], jittered_obs)
-  
-  df <- data.frame(sObs, spaceX)
-
-  if (max(sObs) < 20) {
-    gf1 <- ggplot(df) +
-      geom_segment(aes(x = sObs - (spaceX * 1.25),
-                       xend = sObs + (spaceX * 1.25),
-                       y = sObs, yend = sObs))
-  } else {
-    gf1 <- ggplot(tab0) +
-      geom_abline(intercept = 0, slope = 1)
-  }
-  
-  gf2 <- gf1 +
-    geom_segment(aes(x = jittered_obs, xend = jittered_obs,
-                     y = P2.5, yend = P97.5), data = tab0,
-                 color = tab0$col) +
-    geom_point(aes(x = jittered_obs, y = P50), tab0) +
-    expand_limits(y = 0) +
-    expand_limits(x = 0) +
-    labs(x = xlab, y = ylab, title = main) +
-    theme_minimal()
-  
-  return(gf2)
-}
+#' Posterior predictive check plot for \code{survFitTT} objects
+#'
+#' This is the generic \code{ppc} S3 method for the \code{survFitTT} class. It
+#' plots the predicted values with 95 \% credible intervals versus the observed
+#' values for \code{survFitTT} objects.
+#' 
+#' The coordinates of black points are the observed values of the number of survivors
+#' (pooled replicates) for a given concentration (\eqn{X}-axis) and the corresponding 
+#' predicted values (\eqn{Y}-axis). 95\% prediction intervals are added to each predicted
+#' value, colored in green if this interval contains the observed value and in red
+#' otherwise.
+#' The bisecting line (y = x) is added to the plot in order to see if each
+#' prediction interval contains each observed value. As replicates are shifted
+#' on the x-axis, this line is represented by steps.
+#'
+#' @rdname PPC
+#'
+#' @param x An object of class \code{survFitTT}
+#' @param style graphical backend, can be \code{'generic'} or \code{'ggplot'}
+#' @param main main title for the plot
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @import ggplot2
+#' @import grDevices
+#' @importFrom graphics plot
+#' 
+#' @export
+ppc.survFitTT <- function(x, style = "ggplot", main = NULL, ...) {
+  if (!is(x, "survFitTT"))
+    stop("x is not of class 'survFitTT'!")
+  
+  xlab <- "Observed nb of survivors"
+  ylab <- "Predicted nb of survivors"
+  
+  ppc_gen(EvalsurvPpc(x), style, xlab, ylab, main)
+}
+
+ppc_gen <- function(tab, style, xlab, ylab, main) {
+  
+  if (style == "generic") PpcGeneric(tab, xlab, ylab, main)
+  else if (style == "ggplot") PpcGG(tab, xlab, ylab, main)
+  else stop("Unknown style")
+}
+
+#' @importFrom stats rbinom quantile
+EvalsurvPpc <- function(x) {
+  tot.mcmc <- do.call("rbind", x$mcmc)
+  
+  if (x$det.part == "loglogisticbinom_3") {
+    d <- tot.mcmc[, "d"]
+  }
+  
+  b <- 10^tot.mcmc[, "log10b"]
+  e <- 10^tot.mcmc[, "log10e"]
+  
+  niter <- nrow(tot.mcmc)
+  n <- x$jags.data$n
+  xconc <- x$jags.data$xconc
+  Ninit <- x$jags.data$Ninit
+  NsurvObs <- x$jags.data$Nsurv
+  NsurvPred <- matrix(NA, nrow = niter, ncol = n)
+  
+  if (x$det.part == "loglogisticbinom_2") {
+    for (i in 1:n) {
+      p <- 1 / (1 + (xconc[i]/e)^b)
+      NsurvPred[, i] <- rbinom(niter, Ninit[i], p)
+    }
+  }
+  if (x$det.part == "loglogisticbinom_3") {
+    for (i in 1:n) {
+      p <- d / (1 + (xconc[i]/e)^b)
+      NsurvPred[, i] <- rbinom(niter, Ninit[i], p)
+    }
+  }
+  QNsurvPred <- t(apply(NsurvPred, 2, quantile,
+                        probs = c(2.5, 50, 97.5) / 100))
+  tab <- data.frame(QNsurvPred,
+                    Ninit, NsurvObs,
+                    col = ifelse(QNsurvPred[,"2.5%"] > NsurvObs |
+                                   QNsurvPred[,"97.5%"] < NsurvObs,
+                                 "red", "green"))
+  colnames(tab) <- c("P2.5", "P50", "P97.5", "Ninit", "Obs", "col")
+  
+  return(tab)
+}
+
+#' @importFrom graphics abline segments
+PpcGeneric <- function(tab, xlab, ylab, main) {
+  obs_val <- unique(tab[, "Obs"])
+  sObs <- stepCalc(obs_val)$sObs
+  stepX <- stepCalc(obs_val)$stepX
+  jittered_obs <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$jitterObs
+  spaceX <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$spaceX
+  
+  plot(c(0, max(tab[, "P97.5"])),
+       c(0, max(tab[, "P97.5"])),
+       type = "n",
+       xlab = xlab,
+       ylab = ylab,
+       main = main,
+       xaxt = "n",
+       yaxt = "n")
+  
+  # axis
+  axis(side = 2, at = if (max(tab[, "Obs"]) == 1) {
+    c(0, 1)
+  } else {
+    pretty(c(0, max(tab[, "P97.5"])))
+  })
+  axis(side = 1, at = if (max(tab[, "Obs"]) == 1) {
+    c(0, 1)
+  } else {
+    pretty(c(0, max(tab[, "P97.5"])))
+  })
+  
+  if (max(sObs) < 20) {
+    sapply(1:length(sObs), function(i) {
+      segments(sObs[i] - (spaceX * 1.25), sObs[i],
+               sObs[i] + (spaceX * 1.25), sObs[i])
+    })
+  } else {
+    abline(0, 1)
+  }
+  
+  tab0 <- tab[order(tab$Obs),]
+  segments(jittered_obs, tab0[, "P2.5"],
+           jittered_obs, tab0[, "P97.5"],
+           col = as.character(tab0[, "col"]))
+  
+  points(jittered_obs, tab0[, "P50"],
+         pch = 20)
+}
+
+#' @import ggplot2
+#' @importFrom  grid arrow unit
+PpcGG <- function(tab, xlab, ylab, main) {
+  obs_val <- unique(tab[, "Obs"])
+  sObs <- stepCalc(obs_val)$sObs
+  stepX <- stepCalc(obs_val)$stepX
+  jittered_obs <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$jitterObs
+  spaceX <- jitterObsGenerator(stepX, tab, obs_val, ppc = TRUE)$spaceX
+  
+  tab0 <- cbind(tab[order(tab$Obs),], jittered_obs)
+  
+  df <- data.frame(sObs, spaceX)
+
+  if (max(sObs) < 20) {
+    gf1 <- ggplot(df) +
+      geom_segment(aes(x = sObs - (spaceX * 1.25),
+                       xend = sObs + (spaceX * 1.25),
+                       y = sObs, yend = sObs))
+  } else {
+    gf1 <- ggplot(tab0) +
+      geom_abline(intercept = 0, slope = 1)
+  }
+  
+  gf2 <- gf1 +
+    geom_segment(aes(x = jittered_obs, xend = jittered_obs,
+                     y = P2.5, yend = P97.5), data = tab0,
+                 color = tab0$col) +
+    geom_point(aes(x = jittered_obs, y = P50), tab0) +
+    expand_limits(y = 0) +
+    expand_limits(x = 0) +
+    labs(x = xlab, y = ylab, title = main) +
+    theme_minimal()
+  
+  return(gf2)
+}
diff --git a/R/ppc.survFitVarExp.R b/R/ppc.survFitVarExp.R
index 1fa425e99ba4d5d406a7a541b6eb7575625a7218..6f27b59464adf0f6ce726bc8d8231285121a9cd4 100644
--- a/R/ppc.survFitVarExp.R
+++ b/R/ppc.survFitVarExp.R
@@ -1,101 +1,85 @@
-#' Posterior predictive check plot for \code{survFitVarExp} objects
-#'
-#' This is the generic \code{ppc} S3 method for the \code{survFitVarExp} class. It
-#' plots the predicted values along with 95\% credible intervals
-#' versus the observed values for \code{survFit} objects.
-#'
-#' The black points show the observed number of survivors (on \eqn{X}-axis)
-#'  against the corresponding predicted
-#' number (\eqn{Y}-axis). Predictions come along with 95\% prediction
-#' intervals, which are depicted in green when they contain the
-#' observed value and in red otherwise.
-#'
-#' @rdname PPC
-#'
-#' @param x An object of class \code{survFitVarExp}
-#' @param xlab A label for the \eqn{X}-axis, by default \code{Observed nb of survivors}.
-#' @param ylab A label for the \eqn{Y}-axis, by default \code{Predicted nb of survivors}.
-#' @param main A main title for the plot.
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return a plot of class \code{ggplot}
-#'
-#' @examples
-#'
-#' # (1) Load the data
-#' data(propiconazole_pulse_exposure)
-#'
-#' # (2) Create an object of class "survData"
-#' dat <- survData(propiconazole_pulse_exposure)
-#'
-#' \donttest{
-#' # (3) Run the survFitTKTD function with the TKTD model ('SD' or 'IT')
-#' out <- survFit(dat, model_type = "SD")
-#'
-#' # (4) Plot observed versus predicted values
-#' ppc(out)
-#' }
-#'
-#' @export
-
-ppc.survFitVarExp <- function(x,
-                              xlab = "Observed nb of survivors",
-                              ylab = "Predicted nb of survivors",
-                              main = NULL, ...) {
-  
-  
-  ### compute posteriors median and 95 CI
-  jags.data <- x$jags.data
-  df_ppc <- posteriorData(x)$df_ppc
-  
-  df_plt <- tibble(Nsurv = jags.data$Nsurv,
-                      time = jags.data$time,
-                      replicate = jags.data$replicate,
-                      Nsurv_q50 = apply(df_ppc, 2, quantile, probs = 0.5, na.rm = TRUE),
-                      Nsurv_qinf95 = apply(df_ppc, 2, quantile, probs = 0.025, na.rm = TRUE),
-                      Nsurv_qsup95 = apply(df_ppc, 2, quantile, probs = 0.975, na.rm = TRUE)) %>%
-    mutate(col_range = ifelse(Nsurv_qinf95 > Nsurv | Nsurv_qsup95 < Nsurv, "out", "in"))
-  
-  
-  ppc_plt <- df_plt %>%
-    ggplot() + theme_bw() +
-    theme(legend.position="none") +
-    # expand_limits(x = 0, y = 0) +
-    scale_colour_manual(values = c("green", "red")) +
-    scale_x_continuous(name = xlab) +
-    scale_y_continuous(name = ylab) +
-    geom_abline(slope = 1) +
-    geom_linerange(aes(x = Nsurv,
-                       ymin = Nsurv_qinf95,
-                       ymax = Nsurv_qsup95 ,
-                       group = replicate,
-                       color = col_range),
-                   position = position_dodge(width=0.5)) +
-    geom_point(aes(x = Nsurv,
-                   y = Nsurv_q50,
-                   group = replicate ),
-               position = position_dodge(width=0.5))
-  
-  return(ppc_plt)
-}
-
-
-# @param x An object of class \code{survFitVarExp}
-
-posteriorData <- function(x){
-  
-  model_type = x$model_type
-  mcmc = x$mcmc
-  
-  mctot <- do.call("rbind", mcmc)
-  
-  df_mctot = as_tibble(mctot)
-  
-  df_psurv = select(df_mctot, contains("psurv"))
-  df_ppc = select(df_mctot, contains("Nsurv_ppc"))
-  
-  ls_posteriorData = list( df_psurv = df_psurv,
-                           df_ppc = df_ppc)
-  
-  return(ls_posteriorData)
-}
+#' Posterior predictive check plot for \code{survFitVarExp} objects
+#'
+#' This is the generic \code{ppc} S3 method for the \code{survFitVarExp} class. It
+#' plots the predicted values along with 95\% credible intervals
+#' versus the observed values for \code{survFit} objects.
+#'
+#' The black points show the observed number of survivors (on \eqn{X}-axis)
+#'  against the corresponding predicted
+#' number (\eqn{Y}-axis). Predictions come along with 95\% prediction
+#' intervals, which are depicted in green when they contain the
+#' observed value and in red otherwise.
+#'
+#' @rdname PPC
+#'
+#' @param x An object of class \code{survFitVarExp}
+#' @param xlab A label for the \eqn{X}-axis, by default \code{Observed nb of survivors}.
+#' @param ylab A label for the \eqn{Y}-axis, by default \code{Predicted nb of survivors}.
+#' @param main A main title for the plot.
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return a plot of class \code{ggplot}
+#'
+#' @export
+#' 
+ppc.survFitVarExp <- function(x,
+                              xlab = "Observed nb of survivors",
+                              ylab = "Predicted nb of survivors",
+                              main = NULL, ...) {
+  
+  
+  ### compute posteriors median and 95 CI
+  jags.data <- x$jags.data
+  df_ppc <- posteriorData(x)$df_ppc
+  
+  df_plt <- tibble(Nsurv = jags.data$Nsurv,
+                      time = jags.data$time,
+                      replicate = jags.data$replicate,
+                      Nsurv_q50 = apply(df_ppc, 2, quantile, probs = 0.5, na.rm = TRUE),
+                      Nsurv_qinf95 = apply(df_ppc, 2, quantile, probs = 0.025, na.rm = TRUE),
+                      Nsurv_qsup95 = apply(df_ppc, 2, quantile, probs = 0.975, na.rm = TRUE)) %>%
+    mutate(col_range = ifelse(Nsurv_qinf95 > Nsurv | Nsurv_qsup95 < Nsurv, "out", "in"))
+  
+  
+  ppc_plt <- df_plt %>%
+    ggplot() + theme_bw() +
+    theme(legend.position="none") +
+    # expand_limits(x = 0, y = 0) +
+    scale_colour_manual(values = c("green", "red")) +
+    scale_x_continuous(name = xlab) +
+    scale_y_continuous(name = ylab) +
+    geom_abline(slope = 1) +
+    geom_linerange(aes(x = Nsurv,
+                       ymin = Nsurv_qinf95,
+                       ymax = Nsurv_qsup95 ,
+                       group = replicate,
+                       color = col_range),
+                   position = position_dodge(width=0.5)) +
+    geom_point(aes(x = Nsurv,
+                   y = Nsurv_q50,
+                   group = replicate ),
+               position = position_dodge(width=0.5))
+  
+  return(ppc_plt)
+}
+
+
+# @param x An object of class \code{survFitVarExp}
+
+posteriorData <- function(x){
+  
+  model_type = x$model_type
+  mcmc = x$mcmc
+  
+  mctot <- do.call("rbind", mcmc)
+  
+  df_mctot = as_tibble(mctot)
+  
+  df_psurv = select(df_mctot, contains("psurv"))
+  df_ppc = select(df_mctot, contains("Nsurv_ppc"))
+  
+  ls_posteriorData = list( df_psurv = df_psurv,
+                           df_ppc = df_ppc)
+  
+  return(ls_posteriorData)
+}
diff --git a/R/predict.survFit.R b/R/predict.survFit.R
index 75ecd7fc518e998e11ff55e80b4168b4b663a9db..0f5cafe7b68c92e1ebe45156df84653e2543b14c 100644
--- a/R/predict.survFit.R
+++ b/R/predict.survFit.R
@@ -24,29 +24,6 @@
 #' @return a \code{list} of \code{data.frame} with the quantiles of outputs in
 #' \code{df_quantiles} or all the MCMC chaines \code{df_spaghetti}
 #' 
-#' @examples 
-#'
-#' # (1) Load the survival data
-#' data("propiconazole_pulse_exposure")
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole_pulse_exposure)
-#'
-#' \donttest{
-#' # (3) Run the survFit function
-#' out <- survFit(dataset , model_type = "SD")
-#'
-#' # (4) Create a new data table for prediction
-#' data_4prediction <- data.frame(time = 1:10,
-#'                                conc = c(0,5,30,30,0,0,5,30,15,0),
-#'                                replicate= rep("predict", 10))
-#'
-#' # (5) Predict on a new dataset
-#' predict_out <- predict(object = out, data_predict = data_4prediction, spaghetti = TRUE)
-#'
-#' }
-#' 
-#' 
 #' @export
 #'
 predict.survFit <- function(object,
diff --git a/R/predict_Nsurv.R b/R/predict_Nsurv.R
index 4485c517a9993571595063a4753f19a467b7462a..c15cb5d147020377b8115cf3771d22a8bd25ea8e 100644
--- a/R/predict_Nsurv.R
+++ b/R/predict_Nsurv.R
@@ -1,326 +1,302 @@
-#' \code{Predict_Nsurv} method for \code{survFit} objects
-#' 
-#' It provides the simulated number of survivors for "SD" or "IT" models under
-#' constant or time-variable exposure.
-#' 
-#' @rdname predict
-#' 
-#' @param object an object used to select a method
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return an object of class \code{predict_Nsurv}.
-#' 
-#' @export
-predict_Nsurv <- function(object, ...){
-  UseMethod("predict_Nsurv")
-}
-
-#' \code{Predict_Nsurv} method for \code{survFit} objects
-#'
-#' It provides the simulated number of survivors for "SD" or "IT" models under constant or
-#' time-variable exposure.
-#' 
-#' @rdname predict
-#'
-#' @param object An object of class \code{survFit}
-#' @param data_predict A dataframe with four columns \code{time}, \code{conc}, \code{replicate},
-#' and \code{Nsurv}  used for prediction. If \code{NULL}, prediction is based on an object of 
-#'  class \code{survFit} used for fitting.
-#' @param spaghetti If \code{TRUE}, return a set of survival curves using
-#' parameters drawn from the posterior distribution.
-#' @param mcmc_size Can be used to reduce the number of mcmc samples in order to speed up
-#'  the computation.
-#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into account from the posterior.
-#' If \code{FALSE}, parameter \code{hb} is set to 0. The default is \code{TRUE}.
-#' @param  hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}.
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' 
-#' @return The function returns an object of class \code{survFitPredict_Nsurv}, which is
-#' a list with the two following \code{data.frame}:
-#' \item{df_quantile}{A \code{data.frame} with 10 columns, \code{time}, \code{conc},
-#' \code{replicate}, \code{Nsurv} (observed number of survivors)
-#'  and other columns with median and 95\% credible interval
-#' of the number of survivors computed with 2 different way 
-#' refers as \code{check} and \code{valid}:  
-#' \code{Nsurv_q50_check}, \code{Nsurv_qinf95_check},
-#' \code{Nsurv_qsup95_check}, \code{Nsurv_q50_valid}, \code{Nsurv_qinf95_valid},
-#' \code{Nsurv_qsup95_valid}. The \code{_check} refers to the number of survivors
-#' at time \eqn{t} predicted using the observed number
-#' of survivors at time \eqn{t-1},
-#' while the \code{_valid} refers to the number of survivors predicted at time
-#' \eqn{t} based on the predicted number of survivors at time \eqn{t-1}.}
-#' \item{df_spaghetti}{NULL if arguement \code{spaghetti = FALSE}. With \code{spaghetti = TRUE}, it returns a
-#' dataframe with all simulations based on MCMC parameters from a \code{survFit} object.}
-#' 
-#' 
-#' @examples 
-#'
-#' # (1) Load the survival data
-#' data("propiconazole_pulse_exposure")
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole_pulse_exposure)
-#'
-#' \donttest{
-#' # (3) Run the survFit function
-#' out <- survFit(dataset , model_type = "SD")
-#'
-#' # (4) Create a new data table for prediction
-#' data_4prediction <- data.frame(time = 1:10,
-#'                                conc = c(0,5,30,30,0,0,5,30,15,0),
-#'                                replicate= rep("predict", 10),
-#'                                Nsurv = c(20,20,17,16,15,15,15,14,13,12))
-#'
-#' # (5) Predict Nsurv on a new data set
-#' predict_out <- predict_Nsurv(object = out, data_predict = data_4prediction, spaghetti = TRUE)
-#'
-#' }
-#' 
-#' 
-#' @export
-#'
-predict_Nsurv.survFit <- function(object,
-                            data_predict = NULL,
-                            spaghetti = FALSE,
-                            mcmc_size = NULL,
-                            hb_value = TRUE,
-                            hb_valueFORCED = NA,
-                            extend_time = 100,
-                            ...) {
-  x <- object # Renaming to satisfy CRAN checks on S3 methods
-  # arguments should be named the same when declaring a
-  # method and its instantiations
-  
-  
-  if(!("Nsurv" %in% colnames(data_predict))){
-    warning("Please provide a column 'Nsurv' in the 'data_predict' argument to have
-              prediction on the Number of survivor.")
-  }
-  
-  message("Note that computing can be quite long (several minutes).
-  Tips: To reduce that time you can reduce Number of MCMC chains (default mcmc_size is set to 1000).")
-  
-  # Initialisation
-  mcmc <- x$mcmc
-  model_type <- x$model_type
-
-  if(is.null(data_predict)){
-    if("survFitVarExp" %in% class(x)){
-      x_interpolate = data.frame(
-        time = x$jags.data$time_long,
-        conc = x$jags.data$conc_long,
-        replicate = x$jags.data$replicate_long)
-    } else{
-      data_predict = data.frame(
-        time = x$jags.data$time,
-        conc = x$jags.data$conc,
-        replicate = x$jags.data$replicate,
-        Nsurv = x$jags.data$Nsurv)
-      
-      x_interpolate <- predict_interpolate(data_predict,  extend_time = extend_time) %>%
-        dplyr::arrange(replicate, time)
-    }
-  }
-  if(!is.null(data_predict)){
-    x_interpolate <- predict_interpolate(data_predict,  extend_time = extend_time) %>%
-      dplyr::arrange(replicate, time)
-  }
-  
-  df <- data.frame(
-    time = x_interpolate$time,
-    conc = x_interpolate$conc,
-    replicate = x_interpolate$replicate)
-  
-  unique_replicate <- unique(df$replicate)
-  
-  ls_time <- list()
-  ls_conc <- list()
-  
-  for(i in 1:length(unique_replicate)){
-    
-    ls_time[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$time
-    ls_conc[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$conc
-    
-  }
-  
-  # ------- Computing
-  
-  mcmc.samples = mcmc
-  
-  if(!is.null(mcmc_size)){
-    reduc_tab = lapply(mcmc.samples, "[", 
-                       seq(1, nrow(mcmc.samples[[1]]), length = mcmc_size),
-                       1:ncol(mcmc.samples[[1]]))
-    mcmc.samples = reduc_tab
-  }
-  
-  mctot = do.call("rbind", mcmc.samples)
-  kd = 10^mctot[, "kd_log10"]
-  
-  if(hb_value == TRUE){
-    # "hb" is not in survFit object of morse <v3.2.0
-    if("hb" %in% colnames(mctot)){
-      hb <- mctot[, "hb"]  
-    } else{ hb <- 10^mctot[, "hb_log10"] }
-  } else if(hb_value == FALSE){
-    if(is.na(hb_valueFORCED)){
-      if(is.na(x$hb_valueFIXED)){
-        stop("Please provide value for `hb` using `hb_valueFORCED`.")
-      } else{
-        hb <- rep(x$hb_valueFIXED, nrow(mctot))
-      } 
-    } else{
-      hb <- rep(hb_valueFORCED, nrow(mctot))
-    }
-  }
-  
-  k = 1:length(unique_replicate)
-  
-  if(model_type == "SD"){
-    kk <- 10^mctot[, "kk_log10"]
-    z <- 10^mctot[, "z_log10"]
-    
-    dtheo = lapply(k, function(kit) { # For each replicate
-      Surv.SD_Cext(Cw = ls_conc[[kit]],
-                   time = ls_time[[kit]],
-                   kk=kk,
-                   kd=kd,
-                   hb=hb,
-                   z=z)
-    })
-    
-  }
-  if(model_type == "IT"){
-    
-    alpha <- 10^mctot[, "alpha_log10"]
-    beta <- 10^mctot[, "beta_log10"]
-    
-    dtheo = lapply(k, function(kit) { # For each replicate
-      Surv.IT_Cext(Cw = ls_conc[[kit]],
-                   time = ls_time[[kit]],
-                   kd = kd,
-                   hb = hb,
-                   alpha = alpha,
-                   beta = beta)
-    })
-  }
-  
-  # Transpose
-  dtheo <- do.call("rbind", lapply(dtheo, t))
-
-  # Computing Nsurv
-  
-  df_mcmc <- as_tibble(do.call("rbind", x$mcmc))
-  NsurvPred_valid <- select(df_mcmc, contains("Nsurv_sim"))
-  NsurvPred_check <- select(df_mcmc, contains("Nsurv_ppc"))
-  
-  if(is.null(data_predict) &
-     # The following condition are always true for survFit done after morse v3.2.0 !
-     ncol(NsurvPred_valid) > 0 &
-     ncol(NsurvPred_check) > 0){
-    
-    df_quantile <- data.frame(
-             time = data_predict$time,
-             conc = data_predict$conc,
-             replicate = data_predict$replicate,
-             Nsurv = data_predict$Nsurv,
-             Nsurv_q50_check = apply(NsurvPred_check, 1, quantile, probs = 0.5, na.rm = TRUE),
-             Nsurv_qinf95_check = apply(NsurvPred_check, 1, quantile, probs = 0.025, na.rm = TRUE),
-             Nsurv_qsup95_check = apply(NsurvPred_check, 1, quantile, probs = 0.975, na.rm = TRUE),
-             Nsurv_q50_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.5, na.rm = TRUE),
-             Nsurv_qinf95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.025, na.rm = TRUE),
-             Nsurv_qsup95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.975, na.rm = TRUE))
-    
-  } else{
-      # --------------------
-      
-      df_psurv <- as_tibble(dtheo) %>%
-        mutate(time = df$time,
-               replicate = df$replicate)
-      
-      df_filter <- dplyr::inner_join(df_psurv, data_predict, by = c("replicate", "time")) %>%
-        filter(!is.na(Nsurv)) %>%
-        group_by(replicate) %>%
-        arrange(replicate, time) %>%
-        mutate(Nprec = ifelse(time == min(time), Nsurv, lag(Nsurv)),
-               iter = row_number(),
-               iter_prec = ifelse(time == min(time), iter, lag(iter))) %>%
-        ungroup()
-      
-      mat_psurv <- df_filter %>%
-        select(contains("V"), - Nsurv) %>%
-        as.matrix()
-      
-      ncol_NsurvPred <- ncol(mat_psurv)
-      nrow_NsurvPred <- nrow(mat_psurv)
-      iter = df_filter$iter
-      iter_prec = df_filter$iter_prec
-      
-      NsurvPred_valid <- matrix(ncol = ncol_NsurvPred, nrow = nrow(mat_psurv))
-      
-      Nprec <- cbind(df_filter$Nprec)[, rep(1,ncol_NsurvPred)]
-      
-      mat_psurv_prec = matrix(ncol = ncol_NsurvPred, nrow = nrow_NsurvPred)
-      for(i in 1:nrow_NsurvPred){
-        if(iter[i] == iter_prec[i]){
-          mat_psurv_prec[i,] = mat_psurv[i,]
-        } else{
-          mat_psurv_prec[i,] = mat_psurv[i-1,]
-        }
-      }
-      mat_pSurv_ratio = mat_psurv / mat_psurv_prec
-      
-      NsurvPred_check_vector = rbinom(ncol_NsurvPred*nrow_NsurvPred,
-                                      size = Nprec,
-                                      prob =  mat_pSurv_ratio)
-      NsurvPred_check = matrix(NsurvPred_check_vector, byrow = FALSE, nrow = nrow_NsurvPred)
-      
-      
-      NsurvPred_valid[1, ] = rep(Nprec[1], ncol_NsurvPred)
-      for(i in 2:nrow(mat_psurv)){
-        if(iter[i] == iter_prec[i]){
-          NsurvPred_valid[i,] = NsurvPred_check[i,]
-        } else{
-          NsurvPred_valid[i,] = rbinom(ncol_NsurvPred,
-                                       size = NsurvPred_valid[i-1,],
-                                       prob = mat_pSurv_ratio[i,])
-        }
-      }
-      
-      
-      df_quantile <- data.frame(time = df_filter$time,
-                             conc = df_filter$conc,
-                             replicate = df_filter$replicate,
-                             Nsurv = df_filter$Nsurv,
-                             Nsurv_q50_check = apply(NsurvPred_check, 1, quantile, probs = 0.5, na.rm = TRUE),
-                             Nsurv_qinf95_check = apply(NsurvPred_check, 1, quantile, probs = 0.025, na.rm = TRUE),
-                             Nsurv_qsup95_check = apply(NsurvPred_check, 1, quantile, probs = 0.975, na.rm = TRUE),
-                             Nsurv_q50_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.5, na.rm = TRUE),
-                             Nsurv_qinf95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.025, na.rm = TRUE),
-                             Nsurv_qsup95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.975, na.rm = TRUE))
-      
-      } 
-
-  if(spaghetti == TRUE){
-    random_column <- sample(1:ncol(NsurvPred_valid), size = round(10/100 * ncol(NsurvPred_valid)))
-    df_spaghetti <- as_tibble(NsurvPred_valid[, random_column]) %>%
-      mutate(time = data_predict$time,
-             conc = data_predict$conc,
-             replicate = data_predict$replicate,
-             Nsurv = data_predict$Nsurv)
-  } else df_spaghetti <- NULL
-  
-  #ls_check_on_Nsurv <- check_on_Nsurv(df_quantile)
-  
-  return_object <- list(df_quantile = df_quantile,
-                        df_spaghetti = df_spaghetti)
-  
-  class(return_object) <- c(class(return_object), "survFitPredict_Nsurv")
-  
-  return(return_object)
-  
-}
-
-
-
+#' \code{Predict_Nsurv} method for \code{survFit} objects
+#' 
+#' It provides the simulated number of survivors for "SD" or "IT" models under
+#' constant or time-variable exposure.
+#' 
+#' @rdname predict
+#' 
+#' @param object an object used to select a method
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return an object of class \code{predict_Nsurv}.
+#' 
+#' @export
+predict_Nsurv <- function(object, ...){
+  UseMethod("predict_Nsurv")
+}
+
+#' \code{Predict_Nsurv} method for \code{survFit} objects
+#'
+#' It provides the simulated number of survivors for "SD" or "IT" models under constant or
+#' time-variable exposure.
+#' 
+#' @rdname predict
+#'
+#' @param object An object of class \code{survFit}
+#' @param data_predict A dataframe with four columns \code{time}, \code{conc}, \code{replicate},
+#' and \code{Nsurv}  used for prediction. If \code{NULL}, prediction is based on an object of 
+#'  class \code{survFit} used for fitting.
+#' @param spaghetti If \code{TRUE}, return a set of survival curves using
+#' parameters drawn from the posterior distribution.
+#' @param mcmc_size Can be used to reduce the number of mcmc samples in order to speed up
+#'  the computation.
+#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into account from the posterior.
+#' If \code{FALSE}, parameter \code{hb} is set to 0. The default is \code{TRUE}.
+#' @param  hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}.
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' 
+#' @return The function returns an object of class \code{survFitPredict_Nsurv}, which is
+#' a list with the two following \code{data.frame}:
+#' \item{df_quantile}{A \code{data.frame} with 10 columns, \code{time}, \code{conc},
+#' \code{replicate}, \code{Nsurv} (observed number of survivors)
+#'  and other columns with median and 95\% credible interval
+#' of the number of survivors computed with 2 different way 
+#' refers as \code{check} and \code{valid}:  
+#' \code{Nsurv_q50_check}, \code{Nsurv_qinf95_check},
+#' \code{Nsurv_qsup95_check}, \code{Nsurv_q50_valid}, \code{Nsurv_qinf95_valid},
+#' \code{Nsurv_qsup95_valid}. The \code{_check} refers to the number of survivors
+#' at time \eqn{t} predicted using the observed number
+#' of survivors at time \eqn{t-1},
+#' while the \code{_valid} refers to the number of survivors predicted at time
+#' \eqn{t} based on the predicted number of survivors at time \eqn{t-1}.}
+#' \item{df_spaghetti}{NULL if arguement \code{spaghetti = FALSE}. With \code{spaghetti = TRUE}, it returns a
+#' dataframe with all simulations based on MCMC parameters from a \code{survFit} object.}
+#' 
+#' 
+#' @export
+#'
+predict_Nsurv.survFit <- function(object,
+                            data_predict = NULL,
+                            spaghetti = FALSE,
+                            mcmc_size = NULL,
+                            hb_value = TRUE,
+                            hb_valueFORCED = NA,
+                            extend_time = 100,
+                            ...) {
+  x <- object # Renaming to satisfy CRAN checks on S3 methods
+  # arguments should be named the same when declaring a
+  # method and its instantiations
+  
+  
+  if(!("Nsurv" %in% colnames(data_predict))){
+    warning("Please provide a column 'Nsurv' in the 'data_predict' argument to have
+              prediction on the Number of survivor.")
+  }
+  
+  message("Note that computing can be quite long (several minutes).
+  Tips: To reduce that time you can reduce Number of MCMC chains (default mcmc_size is set to 1000).")
+  
+  # Initialisation
+  mcmc <- x$mcmc
+  model_type <- x$model_type
+
+  if(is.null(data_predict)){
+    if("survFitVarExp" %in% class(x)){
+      x_interpolate = data.frame(
+        time = x$jags.data$time_long,
+        conc = x$jags.data$conc_long,
+        replicate = x$jags.data$replicate_long)
+    } else{
+      data_predict = data.frame(
+        time = x$jags.data$time,
+        conc = x$jags.data$conc,
+        replicate = x$jags.data$replicate,
+        Nsurv = x$jags.data$Nsurv)
+      
+      x_interpolate <- predict_interpolate(data_predict,  extend_time = extend_time) %>%
+        dplyr::arrange(replicate, time)
+    }
+  }
+  if(!is.null(data_predict)){
+    x_interpolate <- predict_interpolate(data_predict,  extend_time = extend_time) %>%
+      dplyr::arrange(replicate, time)
+  }
+  
+  df <- data.frame(
+    time = x_interpolate$time,
+    conc = x_interpolate$conc,
+    replicate = x_interpolate$replicate)
+  
+  unique_replicate <- unique(df$replicate)
+  
+  ls_time <- list()
+  ls_conc <- list()
+  
+  for(i in 1:length(unique_replicate)){
+    
+    ls_time[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$time
+    ls_conc[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$conc
+    
+  }
+  
+  # ------- Computing
+  
+  mcmc.samples = mcmc
+  
+  if(!is.null(mcmc_size)){
+    reduc_tab = lapply(mcmc.samples, "[", 
+                       seq(1, nrow(mcmc.samples[[1]]), length = mcmc_size),
+                       1:ncol(mcmc.samples[[1]]))
+    mcmc.samples = reduc_tab
+  }
+  
+  mctot = do.call("rbind", mcmc.samples)
+  kd = 10^mctot[, "kd_log10"]
+  
+  if(hb_value == TRUE){
+    # "hb" is not in survFit object of morse <v3.2.0
+    if("hb" %in% colnames(mctot)){
+      hb <- mctot[, "hb"]  
+    } else{ hb <- 10^mctot[, "hb_log10"] }
+  } else if(hb_value == FALSE){
+    if(is.na(hb_valueFORCED)){
+      if(is.na(x$hb_valueFIXED)){
+        stop("Please provide value for `hb` using `hb_valueFORCED`.")
+      } else{
+        hb <- rep(x$hb_valueFIXED, nrow(mctot))
+      } 
+    } else{
+      hb <- rep(hb_valueFORCED, nrow(mctot))
+    }
+  }
+  
+  k = 1:length(unique_replicate)
+  
+  if(model_type == "SD"){
+    kk <- 10^mctot[, "kk_log10"]
+    z <- 10^mctot[, "z_log10"]
+    
+    dtheo = lapply(k, function(kit) { # For each replicate
+      Surv.SD_Cext(Cw = ls_conc[[kit]],
+                   time = ls_time[[kit]],
+                   kk=kk,
+                   kd=kd,
+                   hb=hb,
+                   z=z)
+    })
+    
+  }
+  if(model_type == "IT"){
+    
+    alpha <- 10^mctot[, "alpha_log10"]
+    beta <- 10^mctot[, "beta_log10"]
+    
+    dtheo = lapply(k, function(kit) { # For each replicate
+      Surv.IT_Cext(Cw = ls_conc[[kit]],
+                   time = ls_time[[kit]],
+                   kd = kd,
+                   hb = hb,
+                   alpha = alpha,
+                   beta = beta)
+    })
+  }
+  
+  # Transpose
+  dtheo <- do.call("rbind", lapply(dtheo, t))
+
+  # Computing Nsurv
+  
+  df_mcmc <- as_tibble(do.call("rbind", x$mcmc))
+  NsurvPred_valid <- select(df_mcmc, contains("Nsurv_sim"))
+  NsurvPred_check <- select(df_mcmc, contains("Nsurv_ppc"))
+  
+  if(is.null(data_predict) &
+     # The following condition are always true for survFit done after morse v3.2.0 !
+     ncol(NsurvPred_valid) > 0 &
+     ncol(NsurvPred_check) > 0){
+    
+    df_quantile <- data.frame(
+             time = data_predict$time,
+             conc = data_predict$conc,
+             replicate = data_predict$replicate,
+             Nsurv = data_predict$Nsurv,
+             Nsurv_q50_check = apply(NsurvPred_check, 1, quantile, probs = 0.5, na.rm = TRUE),
+             Nsurv_qinf95_check = apply(NsurvPred_check, 1, quantile, probs = 0.025, na.rm = TRUE),
+             Nsurv_qsup95_check = apply(NsurvPred_check, 1, quantile, probs = 0.975, na.rm = TRUE),
+             Nsurv_q50_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.5, na.rm = TRUE),
+             Nsurv_qinf95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.025, na.rm = TRUE),
+             Nsurv_qsup95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.975, na.rm = TRUE))
+    
+  } else{
+      # --------------------
+      
+      df_psurv <- as_tibble(dtheo) %>%
+        mutate(time = df$time,
+               replicate = df$replicate)
+      
+      df_filter <- dplyr::inner_join(df_psurv, data_predict, by = c("replicate", "time")) %>%
+        filter(!is.na(Nsurv)) %>%
+        group_by(replicate) %>%
+        arrange(replicate, time) %>%
+        mutate(Nprec = ifelse(time == min(time), Nsurv, lag(Nsurv)),
+               iter = row_number(),
+               iter_prec = ifelse(time == min(time), iter, lag(iter))) %>%
+        ungroup()
+      
+      mat_psurv <- df_filter %>%
+        select(contains("V"), - Nsurv) %>%
+        as.matrix()
+      
+      ncol_NsurvPred <- ncol(mat_psurv)
+      nrow_NsurvPred <- nrow(mat_psurv)
+      iter = df_filter$iter
+      iter_prec = df_filter$iter_prec
+      
+      NsurvPred_valid <- matrix(ncol = ncol_NsurvPred, nrow = nrow(mat_psurv))
+      
+      Nprec <- cbind(df_filter$Nprec)[, rep(1,ncol_NsurvPred)]
+      
+      mat_psurv_prec = matrix(ncol = ncol_NsurvPred, nrow = nrow_NsurvPred)
+      for(i in 1:nrow_NsurvPred){
+        if(iter[i] == iter_prec[i]){
+          mat_psurv_prec[i,] = mat_psurv[i,]
+        } else{
+          mat_psurv_prec[i,] = mat_psurv[i-1,]
+        }
+      }
+      mat_pSurv_ratio = mat_psurv / mat_psurv_prec
+      
+      NsurvPred_check_vector = rbinom(ncol_NsurvPred*nrow_NsurvPred,
+                                      size = Nprec,
+                                      prob =  mat_pSurv_ratio)
+      NsurvPred_check = matrix(NsurvPred_check_vector, byrow = FALSE, nrow = nrow_NsurvPred)
+      
+      
+      NsurvPred_valid[1, ] = rep(Nprec[1], ncol_NsurvPred)
+      for(i in 2:nrow(mat_psurv)){
+        if(iter[i] == iter_prec[i]){
+          NsurvPred_valid[i,] = NsurvPred_check[i,]
+        } else{
+          NsurvPred_valid[i,] = rbinom(ncol_NsurvPred,
+                                       size = NsurvPred_valid[i-1,],
+                                       prob = mat_pSurv_ratio[i,])
+        }
+      }
+      
+      
+      df_quantile <- data.frame(time = df_filter$time,
+                             conc = df_filter$conc,
+                             replicate = df_filter$replicate,
+                             Nsurv = df_filter$Nsurv,
+                             Nsurv_q50_check = apply(NsurvPred_check, 1, quantile, probs = 0.5, na.rm = TRUE),
+                             Nsurv_qinf95_check = apply(NsurvPred_check, 1, quantile, probs = 0.025, na.rm = TRUE),
+                             Nsurv_qsup95_check = apply(NsurvPred_check, 1, quantile, probs = 0.975, na.rm = TRUE),
+                             Nsurv_q50_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.5, na.rm = TRUE),
+                             Nsurv_qinf95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.025, na.rm = TRUE),
+                             Nsurv_qsup95_valid = apply(NsurvPred_valid, 1, quantile, probs = 0.975, na.rm = TRUE))
+      
+      } 
+
+  if(spaghetti == TRUE){
+    random_column <- sample(1:ncol(NsurvPred_valid), size = round(10/100 * ncol(NsurvPred_valid)))
+    df_spaghetti <- as_tibble(NsurvPred_valid[, random_column]) %>%
+      mutate(time = data_predict$time,
+             conc = data_predict$conc,
+             replicate = data_predict$replicate,
+             Nsurv = data_predict$Nsurv)
+  } else df_spaghetti <- NULL
+  
+  #ls_check_on_Nsurv <- check_on_Nsurv(df_quantile)
+  
+  return_object <- list(df_quantile = df_quantile,
+                        df_spaghetti = df_spaghetti)
+  
+  class(return_object) <- c(class(return_object), "survFitPredict_Nsurv")
+  
+  return(return_object)
+  
+}
+
+
+
diff --git a/R/predict_ode.survFit.R b/R/predict_ode.survFit.R
index c8e62721076620ddb57b0c5e93b1d142d938ae0d..c140fb03a23513ba9917c1380d6ad2ee4539b58d 100644
--- a/R/predict_ode.survFit.R
+++ b/R/predict_ode.survFit.R
@@ -1,349 +1,326 @@
-#' Predict method for \code{survFit} objects
-#' 
-#' This is a \code{method} to replace function \code{predict} used on \code{survFit}
-#' object when computing issues happen. \code{predict_ode} uses the \code{deSolve}
-#' library to improve robustness. However, time to compute may be longer.
-#' 
-#' 
-#' @param object an object used to select a method \code{ppc}
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return an object of class \code{predict_ode}
-#' 
-#' @export
-predict_ode <- function(object, ...){
-  UseMethod("predict_ode")
-}
-
-#' Predict method for \code{survFit} objects
-#'
-#' This is the generic \code{predict} S3 method for the \code{survFit} class.
-#' It provides predicted survival rate for "SD" or "IT" models under constant or time-variable exposure.
-#'
-#' @param object An object of class \code{survFit}.
-#' @param data_predict A dataframe with three columns \code{time}, \code{conc} and \code{replicate}
-#'  used for prediction. If \code{NULL}, prediction is based on \code{x} object of 
-#'  class \code{survFit} used for fitting.
-#' @param spaghetti If \code{TRUE}, return a set of survival curves using
-#' parameters drawn from the posterior distribution.
-#' @param mcmc_size Can be used to reduce the number of mcmc samples in order to speed up
-#'  the computation. \code{mcmc_size} is the number of selected iterations for one chain. Default
-#'  is 1000. If all MCMC is wanted, set argument to \code{NULL}.
-#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into account from the posterior.
-#' If \code{FALSE}, parameter \code{hb} is set to a fixed value. The default is \code{TRUE}.
-#' @param interpolate_length Length of the time sequence for which output is wanted.
-#' @param interpolate_method The interpolation method for concentration. See package \code{deSolve} for details.
-#' Default is \code{linear}.
-#' @param  hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}.
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return a \code{list} of \code{data.frame} with the quantiles of outputs in
-#' \code{df_quantiles} or all the MCMC chaines \code{df_spaghetti}
-#' 
-#' @examples 
-#'
-#' # (1) Load the survival data
-#' data("propiconazole_pulse_exposure")
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole_pulse_exposure)
-#'
-#' \donttest{
-#' # (3) Run the survFit function
-#' out <- survFit(dataset , model_type = "SD")
-#'
-#' # (4) Create a new data table for prediction
-#' data_4prediction <- data.frame(time = 1:10,
-#'                                conc = c(0,5,30,30,0,0,5,30,15,0),
-#'                                replicate= rep("predict", 10))
-#'
-#' # (5) Predict on a new data set
-#' predict_out <- predict_ode(object = out, data_predict = data_4prediction,
-#'                            mcmc_size = 1000, spaghetti = TRUE)
-#'
-#' }
-#' 
-#' @import deSolve
-#' @importFrom stats approxfun
-#' 
-#' @export
-#'
-predict_ode.survFit <- function(object,
-                                data_predict = NULL,
-                                spaghetti = FALSE,
-                                mcmc_size = 1000,
-                                hb_value = TRUE,
-                                interpolate_length = 100,
-                                interpolate_method = "linear",
-                                hb_valueFORCED = NA,
-                                ...) {
-  x <- object # Renaming to satisfy CRAN checks on S3 methods
-  # arguments should be named the same when declaring a
-  # method and its instantiations
-  
-  # Initialisation
-  mcmc <- x$mcmc
-  model_type <- x$model_type
-  
-  if(is.null(data_predict)){
-    x_interpolate = data.frame(
-        time = x$jags.data$time,
-        conc = x$jags.data$conc,
-        replicate = x$jags.data$replicate)
-  }
-  if(!is.null(data_predict)){
-    x_interpolate <- data_predict
-  }
-
-  df <- data.frame(
-    time = x_interpolate$time,
-    conc = x_interpolate$conc,
-    replicate = x_interpolate$replicate)
-  
-  unique_replicate <- unique(df$replicate)
-  
-  ls_time <- list()
-  ls_conc <- list()
-  
-  for(i in 1:length(unique_replicate)){
-    
-    ls_time[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$time
-    ls_conc[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$conc
-    
-  }
-  # ------- Computing
-  
-  mcmc.samples = mcmc
-  
-  if(!is.null(mcmc_size)){
-    reduc_tab = lapply(mcmc.samples, "[", 
-                       seq(1, nrow(mcmc.samples[[1]]), length = mcmc_size),
-                       1:ncol(mcmc.samples[[1]]))
-    mcmc.samples = reduc_tab
-  }
-  
-  mctot = do.call("rbind", mcmc.samples)
-  #if(is.null(mcmc_size)){
-  mcmc_size = nrow(mctot)
-  #}
-  
-  kd = 10^mctot[, "kd_log10"]
-  
-  if(hb_value == TRUE){
-    hb <- 10^mctot[, "hb_log10"]
-  } else if(hb_value == FALSE){
-    if(is.na(hb_valueFORCED)){
-      if(is.na(x$hb_valueFIXED)){
-        stop("Please provide value for `hb` using `hb_valueFORCED`.")
-      } else{
-        hb <- rep(x$hb_valueFIXED, nrow(mctot))
-      } 
-    } else{
-      hb <- rep(hb_valueFORCED, nrow(mctot))
-    }
-  }
-  
-  k = 1:length(unique_replicate)
-  
-  if(model_type == "SD"){
-    kk <- 10^mctot[, "kk_log10"]
-    z <- 10^mctot[, "z_log10"]
-    
-    dtheo = lapply(k, function(kit) { # For each replicate
-      SurvSD_ode(Cw = ls_conc[[kit]],
-                 time = ls_time[[kit]],
-                 replicate = unique_replicate[kit],
-                 kk=kk,
-                 kd=kd,
-                 hb=hb,
-                 z=z,
-                 mcmc_size = mcmc_size,
-                 interpolate_length = interpolate_length,
-                 interpolate_method = interpolate_method)
-    })
-    
-  }
-  if(model_type == "IT"){
-    
-    alpha <- 10^mctot[, "alpha_log10"]
-    beta <- 10^mctot[, "beta_log10"]
-    
-    dtheo = lapply(k, function(kit) { # For each replicate
-      SurvIT_ode(Cw = ls_conc[[kit]],
-                 time = ls_time[[kit]],
-                 replicate = unique_replicate[kit],
-                 kd = kd,
-                 hb = hb,
-                 alpha = alpha,
-                 beta = beta,
-                 mcmc_size = mcmc_size,
-                 interpolate_length = interpolate_length,
-                 interpolate_method = interpolate_method)
-    })
-    
-  }
-  
-  # Transpose
-  df_theo <- do.call("rbind", dtheo)
-  
-  df_quantile = select(df_theo, time, conc, replicate, q50, qinf95, qsup95)
-  
-  if(spaghetti == TRUE){
-    df_spaghetti <- df_theo
-  } else df_spaghetti <- NULL
-  
-  return_object <- list(df_quantile = df_quantile,
-                        df_spaghetti = df_spaghetti)
-  
-  class(return_object) <- c(class(return_object), "survFitPredict")
-  
-  return(return_object)
-  
-}
-
-# Survival function for "IT" model with external concentration changing with time
-#
-# @param Cw A scalar of external concentration
-# @param time A vector of time
-# @param kk a vector of parameter
-# @param kd a vector of parameter
-# @param z a vector of parameter
-# @param hb a vector of parameter
-# 
-#
-# @return A matrix generate with coda.samples() function
-#
-
-SurvSD_ode <- function(Cw, time, replicate, kk, kd, z, hb, mcmc_size = 1000, interpolate_length = NULL, interpolate_method=c("linear","constant")) {
-  interpolate_method <- match.arg(interpolate_method)
-  
-  ## external signal with several rectangle impulses
-  signal <- data.frame(times=time,import=Cw)
-  if(!is.null(interpolate_length)){
-    times <- seq(min(time), max(time), length = interpolate_length)
-  } else{
-    times <- signal$times
-  }
-  
-  xstart <- c(rep(c(D=0),mcmc_size),rep(c(H=0),mcmc_size))
-  # ordering of parameters required by compiled function
-  parms <- c(mcmc_size,kd,hb,z,kk)
-  # solve model
-  on.exit(.C("gutsredsd_free")) # clean up
-  deSolve::ode(y=xstart,
-               times=times,
-               parms=parms,
-               method="lsoda",
-               dllname="morse",
-               initfunc="gutsredsd_init",
-               func="gutsredsd_func",
-               initforc="gutsredsd_forc",
-               forcings=signal,
-               fcontrol=list(method=interpolate_method,rule=2,ties="ordered"),
-               nout=1
-  ) -> out
-  
-  dtheo <- exp(-out[,grep("H",colnames(out))])
-
-  # Manage vector case
-  if(mcmc_size == 1){
-    q50 = dtheo
-    qinf95 = dtheo
-    qsup95 = dtheo
-  } else{
-    qs <- apply(as.matrix(dtheo), 1, quantile, probs=c(0.5,0.025,0.975), names=FALSE, na.rm=TRUE)
-    q50 = qs[1,]
-    qinf95 = qs[2,]
-    qsup95 = qs[3,]
-  }
-  
-  dtheo <- as.data.frame(dtheo)
-  names(dtheo) <- paste0("H",seq(1,mcmc_size))
-  dtheo <- dtheo %>%
-    dplyr::mutate(time = times,
-           conc = out[,ncol(out)],
-           replicate = c(replicate),
-           q50 = q50,
-           qinf95 = qinf95,
-           qsup95 = qsup95)
-  
-  return(dtheo)
-}
-
-# Survival function for "IT" model with external concentration changing with time
-#
-# @param Cw A vector of external concentration
-# @param time A vector of time
-# @param replicate A scalar of char
-# @param kk a vector of parameter
-# @param kd a vector of parameter
-# @param z a vector of parameter
-# @param hb a vector of parameter
-# 
-#
-# @return A matrix generate with coda.samples() function
-#
-
-SurvIT_ode <- function(Cw, time, replicate, kd, hb, alpha, beta, mcmc_size = NULL, interpolate_length = NULL, interpolate_method=c("linear","constant")){
-  interpolate_method <- match.arg(interpolate_method)
-  
-  ## external signal with several rectangle impulses
-  signal <- data.frame(times=time,import=Cw)
-  if(!is.null(interpolate_length)){
-    times <- seq(min(time), max(time), length = interpolate_length)
-  } else{
-    times <- signal$times
-  }
-  
-  ## The parameters
-  parms  <- c(mcmc_size,kd,hb)
-  
-  ## Start values for steady state
-  xstart <- c(rep(c(D=0),mcmc_size),rep(c(H=0),mcmc_size))
-  
-  ## Solve model
-  on.exit(.C("gutsredit_free")) # clean up
-  deSolve::ode(y=xstart,
-               times=times,
-               parms=parms,
-               method="lsoda",
-               dllname="morse",
-               initfunc="gutsredit_init",
-               func="gutsredit_func",
-               initforc="gutsredit_forc",
-               forcings=signal,
-               fcontrol=list(method=interpolate_method,rule=2,ties="ordered"),
-               nout=1
-  ) -> out
-
-  D <- out[,grep("D",colnames(out))]
-  cumMax_D <- if(is.null(dim(D))) cummax(D) else apply(D, 2, cummax)
-  thresholdIT <- t(1 / (1 + (t(cumMax_D) / alpha)^(-beta)))
-  
-  dtheo <- (1 - thresholdIT) * exp(times %*% t(-hb))
-
-  # Manage vector case
-  if(mcmc_size == 1){
-    q50 = dtheo
-    qinf95 = dtheo
-    qsup95 = dtheo
-  } else{
-    qs <- apply(as.matrix(dtheo), 1, quantile, probs=c(0.5,0.025,0.975), names=FALSE, na.rm=TRUE)
-    q50 = qs[1,]
-    qinf95 = qs[2,]
-    qsup95 = qs[3,]
-  }
-  
-  dtheo <- as.data.frame(dtheo)
-  names(dtheo) <- paste0("H",seq(1,mcmc_size))
-  dtheo <- dtheo %>%
-    dplyr::mutate(time = out[, "time"],
-            conc = out[,ncol(out)],
-            replicate = c(replicate),
-            q50 = q50,
-            qinf95 = qinf95,
-            qsup95 = qsup95)
-
-  return(dtheo)
-  
-}
+#' Predict method for \code{survFit} objects
+#' 
+#' This is a \code{method} to replace function \code{predict} used on \code{survFit}
+#' object when computing issues happen. \code{predict_ode} uses the \code{deSolve}
+#' library to improve robustness. However, time to compute may be longer.
+#' 
+#' 
+#' @param object an object used to select a method \code{ppc}
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return an object of class \code{predict_ode}
+#' 
+#' @export
+predict_ode <- function(object, ...){
+  UseMethod("predict_ode")
+}
+
+#' Predict method for \code{survFit} objects
+#'
+#' This is the generic \code{predict} S3 method for the \code{survFit} class.
+#' It provides predicted survival rate for "SD" or "IT" models under constant or time-variable exposure.
+#'
+#' @param object An object of class \code{survFit}.
+#' @param data_predict A dataframe with three columns \code{time}, \code{conc} and \code{replicate}
+#'  used for prediction. If \code{NULL}, prediction is based on \code{x} object of 
+#'  class \code{survFit} used for fitting.
+#' @param spaghetti If \code{TRUE}, return a set of survival curves using
+#' parameters drawn from the posterior distribution.
+#' @param mcmc_size Can be used to reduce the number of mcmc samples in order to speed up
+#'  the computation. \code{mcmc_size} is the number of selected iterations for one chain. Default
+#'  is 1000. If all MCMC is wanted, set argument to \code{NULL}.
+#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into account from the posterior.
+#' If \code{FALSE}, parameter \code{hb} is set to a fixed value. The default is \code{TRUE}.
+#' @param interpolate_length Length of the time sequence for which output is wanted.
+#' @param interpolate_method The interpolation method for concentration. See package \code{deSolve} for details.
+#' Default is \code{linear}.
+#' @param  hb_valueFORCED If \code{hb_value} is \code{FALSE}, it fix \code{hb}.
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return a \code{list} of \code{data.frame} with the quantiles of outputs in
+#' \code{df_quantiles} or all the MCMC chaines \code{df_spaghetti}
+#' 
+#' @import deSolve
+#' @importFrom stats approxfun
+#' 
+#' @export
+#'
+predict_ode.survFit <- function(object,
+                                data_predict = NULL,
+                                spaghetti = FALSE,
+                                mcmc_size = 1000,
+                                hb_value = TRUE,
+                                interpolate_length = 100,
+                                interpolate_method = "linear",
+                                hb_valueFORCED = NA,
+                                ...) {
+  x <- object # Renaming to satisfy CRAN checks on S3 methods
+  # arguments should be named the same when declaring a
+  # method and its instantiations
+  
+  # Initialisation
+  mcmc <- x$mcmc
+  model_type <- x$model_type
+  
+  if(is.null(data_predict)){
+    x_interpolate = data.frame(
+        time = x$jags.data$time,
+        conc = x$jags.data$conc,
+        replicate = x$jags.data$replicate)
+  }
+  if(!is.null(data_predict)){
+    x_interpolate <- data_predict
+  }
+
+  df <- data.frame(
+    time = x_interpolate$time,
+    conc = x_interpolate$conc,
+    replicate = x_interpolate$replicate)
+  
+  unique_replicate <- unique(df$replicate)
+  
+  ls_time <- list()
+  ls_conc <- list()
+  
+  for(i in 1:length(unique_replicate)){
+    
+    ls_time[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$time
+    ls_conc[[i]] <- dplyr::filter(df, replicate == unique_replicate[i])$conc
+    
+  }
+  # ------- Computing
+  
+  mcmc.samples = mcmc
+  
+  if(!is.null(mcmc_size)){
+    reduc_tab = lapply(mcmc.samples, "[", 
+                       seq(1, nrow(mcmc.samples[[1]]), length = mcmc_size),
+                       1:ncol(mcmc.samples[[1]]))
+    mcmc.samples = reduc_tab
+  }
+  
+  mctot = do.call("rbind", mcmc.samples)
+  #if(is.null(mcmc_size)){
+  mcmc_size = nrow(mctot)
+  #}
+  
+  kd = 10^mctot[, "kd_log10"]
+  
+  if(hb_value == TRUE){
+    hb <- 10^mctot[, "hb_log10"]
+  } else if(hb_value == FALSE){
+    if(is.na(hb_valueFORCED)){
+      if(is.na(x$hb_valueFIXED)){
+        stop("Please provide value for `hb` using `hb_valueFORCED`.")
+      } else{
+        hb <- rep(x$hb_valueFIXED, nrow(mctot))
+      } 
+    } else{
+      hb <- rep(hb_valueFORCED, nrow(mctot))
+    }
+  }
+  
+  k = 1:length(unique_replicate)
+  
+  if(model_type == "SD"){
+    kk <- 10^mctot[, "kk_log10"]
+    z <- 10^mctot[, "z_log10"]
+    
+    dtheo = lapply(k, function(kit) { # For each replicate
+      SurvSD_ode(Cw = ls_conc[[kit]],
+                 time = ls_time[[kit]],
+                 replicate = unique_replicate[kit],
+                 kk=kk,
+                 kd=kd,
+                 hb=hb,
+                 z=z,
+                 mcmc_size = mcmc_size,
+                 interpolate_length = interpolate_length,
+                 interpolate_method = interpolate_method)
+    })
+    
+  }
+  if(model_type == "IT"){
+    
+    alpha <- 10^mctot[, "alpha_log10"]
+    beta <- 10^mctot[, "beta_log10"]
+    
+    dtheo = lapply(k, function(kit) { # For each replicate
+      SurvIT_ode(Cw = ls_conc[[kit]],
+                 time = ls_time[[kit]],
+                 replicate = unique_replicate[kit],
+                 kd = kd,
+                 hb = hb,
+                 alpha = alpha,
+                 beta = beta,
+                 mcmc_size = mcmc_size,
+                 interpolate_length = interpolate_length,
+                 interpolate_method = interpolate_method)
+    })
+    
+  }
+  
+  # Transpose
+  df_theo <- do.call("rbind", dtheo)
+  
+  df_quantile = select(df_theo, time, conc, replicate, q50, qinf95, qsup95)
+  
+  if(spaghetti == TRUE){
+    df_spaghetti <- df_theo
+  } else df_spaghetti <- NULL
+  
+  return_object <- list(df_quantile = df_quantile,
+                        df_spaghetti = df_spaghetti)
+  
+  class(return_object) <- c(class(return_object), "survFitPredict")
+  
+  return(return_object)
+  
+}
+
+# Survival function for "IT" model with external concentration changing with time
+#
+# @param Cw A scalar of external concentration
+# @param time A vector of time
+# @param kk a vector of parameter
+# @param kd a vector of parameter
+# @param z a vector of parameter
+# @param hb a vector of parameter
+# 
+#
+# @return A matrix generate with coda.samples() function
+#
+
+SurvSD_ode <- function(Cw, time, replicate, kk, kd, z, hb, mcmc_size = 1000, interpolate_length = NULL, interpolate_method=c("linear","constant")) {
+  interpolate_method <- match.arg(interpolate_method)
+  
+  ## external signal with several rectangle impulses
+  signal <- data.frame(times=time,import=Cw)
+  if(!is.null(interpolate_length)){
+    times <- seq(min(time), max(time), length = interpolate_length)
+  } else{
+    times <- signal$times
+  }
+  
+  xstart <- c(rep(c(D=0),mcmc_size),rep(c(H=0),mcmc_size))
+  # ordering of parameters required by compiled function
+  parms <- c(mcmc_size,kd,hb,z,kk)
+  # solve model
+  on.exit(.C("gutsredsd_free")) # clean up
+  deSolve::ode(y=xstart,
+               times=times,
+               parms=parms,
+               method="lsoda",
+               dllname="morse",
+               initfunc="gutsredsd_init",
+               func="gutsredsd_func",
+               initforc="gutsredsd_forc",
+               forcings=signal,
+               fcontrol=list(method=interpolate_method,rule=2,ties="ordered"),
+               nout=1
+  ) -> out
+  
+  dtheo <- exp(-out[,grep("H",colnames(out))])
+
+  # Manage vector case
+  if(mcmc_size == 1){
+    q50 = dtheo
+    qinf95 = dtheo
+    qsup95 = dtheo
+  } else{
+    qs <- apply(as.matrix(dtheo), 1, quantile, probs=c(0.5,0.025,0.975), names=FALSE, na.rm=TRUE)
+    q50 = qs[1,]
+    qinf95 = qs[2,]
+    qsup95 = qs[3,]
+  }
+  
+  dtheo <- as.data.frame(dtheo)
+  names(dtheo) <- paste0("H",seq(1,mcmc_size))
+  dtheo <- dtheo %>%
+    dplyr::mutate(time = times,
+           conc = out[,ncol(out)],
+           replicate = c(replicate),
+           q50 = q50,
+           qinf95 = qinf95,
+           qsup95 = qsup95)
+  
+  return(dtheo)
+}
+
+# Survival function for "IT" model with external concentration changing with time
+#
+# @param Cw A vector of external concentration
+# @param time A vector of time
+# @param replicate A scalar of char
+# @param kk a vector of parameter
+# @param kd a vector of parameter
+# @param z a vector of parameter
+# @param hb a vector of parameter
+# 
+#
+# @return A matrix generate with coda.samples() function
+#
+
+SurvIT_ode <- function(Cw, time, replicate, kd, hb, alpha, beta, mcmc_size = NULL, interpolate_length = NULL, interpolate_method=c("linear","constant")){
+  interpolate_method <- match.arg(interpolate_method)
+  
+  ## external signal with several rectangle impulses
+  signal <- data.frame(times=time,import=Cw)
+  if(!is.null(interpolate_length)){
+    times <- seq(min(time), max(time), length = interpolate_length)
+  } else{
+    times <- signal$times
+  }
+  
+  ## The parameters
+  parms  <- c(mcmc_size,kd,hb)
+  
+  ## Start values for steady state
+  xstart <- c(rep(c(D=0),mcmc_size),rep(c(H=0),mcmc_size))
+  
+  ## Solve model
+  on.exit(.C("gutsredit_free")) # clean up
+  deSolve::ode(y=xstart,
+               times=times,
+               parms=parms,
+               method="lsoda",
+               dllname="morse",
+               initfunc="gutsredit_init",
+               func="gutsredit_func",
+               initforc="gutsredit_forc",
+               forcings=signal,
+               fcontrol=list(method=interpolate_method,rule=2,ties="ordered"),
+               nout=1
+  ) -> out
+
+  D <- out[,grep("D",colnames(out))]
+  cumMax_D <- if(is.null(dim(D))) cummax(D) else apply(D, 2, cummax)
+  thresholdIT <- t(1 / (1 + (t(cumMax_D) / alpha)^(-beta)))
+  
+  dtheo <- (1 - thresholdIT) * exp(times %*% t(-hb))
+
+  # Manage vector case
+  if(mcmc_size == 1){
+    q50 = dtheo
+    qinf95 = dtheo
+    qsup95 = dtheo
+  } else{
+    qs <- apply(as.matrix(dtheo), 1, quantile, probs=c(0.5,0.025,0.975), names=FALSE, na.rm=TRUE)
+    q50 = qs[1,]
+    qinf95 = qs[2,]
+    qsup95 = qs[3,]
+  }
+  
+  dtheo <- as.data.frame(dtheo)
+  names(dtheo) <- paste0("H",seq(1,mcmc_size))
+  dtheo <- dtheo %>%
+    dplyr::mutate(time = out[, "time"],
+            conc = out[,ncol(out)],
+            replicate = c(replicate),
+            q50 = q50,
+            qinf95 = qinf95,
+            qsup95 = qsup95)
+
+  return(dtheo)
+  
+}
diff --git a/R/print.reproFitTT.R b/R/print.reproFitTT.R
index dca04d568157e89a51bb4c898c7fba49e62820b0..f7a26e3709e51dc8233d23b53496cd5047120f88 100644
--- a/R/print.reproFitTT.R
+++ b/R/print.reproFitTT.R
@@ -1,46 +1,29 @@
-#' Print of \code{reproFitTT} object
-#' 
-#' This is the generic \code{print} S3 method for the \code{reproFitTT} class.
-#' It prints the underlying JAGS model and some information on the Bayesian 
-#' inference procedure.
-#' 
-#' @param x An object of class \code{reproFitTT}
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return print the model text and the Jags Computing information
-#' 
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#' 
-#' # (2) Create an object of class 'reproData'
-#' cadmium1 <- reproData(cadmium1)
-#' 
-#' \donttest{
-#' # (3) Run the reproFitTT function with the log-logistic
-#' # model
-#' out <- reproFitTT(cadmium1, ecx = c(5, 10, 15, 20, 30, 50, 80),
-#' quiet = TRUE)
-#' 
-#' # (4) Print the reproFitTT object
-#' print(out)
-#' }
-#' 
-#' @keywords print
-#' 
-#' @export
-print.reproFitTT <- function(x, ...) {
-  # print the model text and the Jags Computing information
-  # for an object of class reproFitTT
-  
-  # M.C.M.C. informations
-  cat("Model:\n")
-  print(x$model)
-  cat("\nComputing information:\n\n")
-  cat("\n", "Iterations = ", x$n.iter[["start"]], ":",
-      x$n.iter[["end"]], "\n", sep = "")
-  cat("Thinning interval =", x$n.thin, "\n")
-  cat("Number of chains =", x$n.chains, "\n")
-  cat("Sample size per chain =",
-      (x$n.iter[["end"]] - x$n.iter[["start"]]) / x$n.thin + 1, "\n")
-}
+#' Print of \code{reproFitTT} object
+#' 
+#' This is the generic \code{print} S3 method for the \code{reproFitTT} class.
+#' It prints the underlying JAGS model and some information on the Bayesian 
+#' inference procedure.
+#' 
+#' @param x An object of class \code{reproFitTT}
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return print the model text and the Jags Computing information
+#' 
+#' @keywords print
+#' 
+#' @export
+print.reproFitTT <- function(x, ...) {
+  # print the model text and the Jags Computing information
+  # for an object of class reproFitTT
+  
+  # M.C.M.C. informations
+  cat("Model:\n")
+  print(x$model)
+  cat("\nComputing information:\n\n")
+  cat("\n", "Iterations = ", x$n.iter[["start"]], ":",
+      x$n.iter[["end"]], "\n", sep = "")
+  cat("Thinning interval =", x$n.thin, "\n")
+  cat("Number of chains =", x$n.chains, "\n")
+  cat("Sample size per chain =",
+      (x$n.iter[["end"]] - x$n.iter[["start"]]) / x$n.thin + 1, "\n")
+}
diff --git a/R/print.survFitCstExp.R b/R/print.survFitCstExp.R
index 0b8814de431f88659b2d464edd2cd82053357b7e..fee0b867f1fbb8f2fac27e8951574fbd1f374f15 100644
--- a/R/print.survFitCstExp.R
+++ b/R/print.survFitCstExp.R
@@ -1,52 +1,37 @@
-#' Print of \code{survFit} object
-#' 
-#' This is the generic \code{print} S3 method for the \code{survFitCstExp} class.
-#' It prints the underlying JAGS model and some information on the Bayesian 
-#' inference procedure.
-#' 
-#' @param x An object of class \code{survFitCstExp}
-#' @param \dots Further arguments to be passed to generic methods.
-#' 
-#' @return print the model text and the Jags Computing information
-#' 
-#' @examples
-#' # (1) Load the data
-#' data(propiconazole)
-#' 
-#' # (2) Create an object of class 'survData'
-#' dat <- survData(propiconazole)
-#' 
-#' \donttest{
-#' # (3) Run the survFit function with TKTD model 'SD' or 'IT' 
-#' out <- survFit(dat, quiet = TRUE, model_type="SD")
-#' 
-#' # (4) Print the survFit object
-#' print(out)
-#' }
-#' 
-#' @keywords print
-#' 
-#' @export
-print.survFitCstExp <- function(x, ...) {
-  # print the model text and the Jags Computing information
-  # for an object of class survFit
-  
-  summary_mcmc <- summary(x$mcmc)
-  
-  n.chains <- summary_mcmc$nchain
-  n.thin <- summary_mcmc$thin
-  end <- summary_mcmc$end
-  start <- summary_mcmc$start
-
-  
-  # M.C.M.C. informations
-  cat("Model:\n")
-  print(x$model)
-  cat("\nComputing information:\n\n")
-  cat("\n", "Iterations = ", start, ":",
-      end, "\n", sep = "")
-  cat("Thinning interval =", n.thin, "\n")
-  cat("Number of chains =", n.chains, "\n")
-  cat("Sample size per chain =",
-      (end - start) / n.thin + 1, "\n")
-}
+#' Print of \code{survFit} object
+#' 
+#' This is the generic \code{print} S3 method for the \code{survFitCstExp} class.
+#' It prints the underlying JAGS model and some information on the Bayesian 
+#' inference procedure.
+#' 
+#' @param x An object of class \code{survFitCstExp}
+#' @param \dots Further arguments to be passed to generic methods.
+#' 
+#' @return print the model text and the Jags Computing information
+#' 
+#' @keywords print
+#' 
+#' @export
+print.survFitCstExp <- function(x, ...) {
+  # print the model text and the Jags Computing information
+  # for an object of class survFit
+  
+  summary_mcmc <- summary(x$mcmc)
+  
+  n.chains <- summary_mcmc$nchain
+  n.thin <- summary_mcmc$thin
+  end <- summary_mcmc$end
+  start <- summary_mcmc$start
+
+  
+  # M.C.M.C. informations
+  cat("Model:\n")
+  print(x$model)
+  cat("\nComputing information:\n\n")
+  cat("\n", "Iterations = ", start, ":",
+      end, "\n", sep = "")
+  cat("Thinning interval =", n.thin, "\n")
+  cat("Number of chains =", n.chains, "\n")
+  cat("Sample size per chain =",
+      (end - start) / n.thin + 1, "\n")
+}
diff --git a/R/print.survFitTKTD.R b/R/print.survFitTKTD.R
index b17b1710811d79a9aa93516ca1c700f639d3eee6..183973bd957afc284d31fb28952b830dc0ae288b 100644
--- a/R/print.survFitTKTD.R
+++ b/R/print.survFitTKTD.R
@@ -1,44 +1,29 @@
-#' Print of \code{survFitTKTD} object
-#' 
-#' This is the generic \code{print} S3 method for the \code{survFitTKTD} class.
-#' It prints the underlying JAGS model and some information on the Bayesian 
-#' inference procedure.
-#' 
-#' @param x An object of class \code{survFitTKTD}
-#' @param \dots Further arguments to be passed to generic methods.
-#' 
-#' @return print the model text and the Jags Computing information
-#' 
-#' @examples
-#' # (1) Load the data
-#' data(propiconazole)
-#' 
-#' # (2) Create an object of class 'survData'
-#' dat <- survData(propiconazole)
-#' 
-#' \donttest{
-#' # (3) Run the survFitTKTD function
-#' out <- survFitTKTD(dat, quiet = TRUE)
-#' 
-#' # (4) Print the survFitTKTD object
-#' print(out)
-#' }
-#' 
-#' @keywords print
-#' 
-#' @export
-print.survFitTKTD <- function(x, ...) {
-  # print the model text and the Jags Computing information
-  # for an object of class survFitTKTD
-  
-  # M.C.M.C. informations
-  cat("Model:\n")
-  print(x$model)
-  cat("\nComputing information:\n\n")
-  cat("\n", "Iterations = ", x$n.iter[["start"]], ":",
-      x$n.iter[["end"]], "\n", sep = "")
-  cat("Thinning interval =", x$n.thin, "\n")
-  cat("Number of chains =", x$n.chains, "\n")
-  cat("Sample size per chain =",
-      (x$n.iter[["end"]] - x$n.iter[["start"]]) / x$n.thin + 1, "\n")
-}
+#' Print of \code{survFitTKTD} object
+#' 
+#' This is the generic \code{print} S3 method for the \code{survFitTKTD} class.
+#' It prints the underlying JAGS model and some information on the Bayesian 
+#' inference procedure.
+#' 
+#' @param x An object of class \code{survFitTKTD}
+#' @param \dots Further arguments to be passed to generic methods.
+#' 
+#' @return print the model text and the Jags Computing information
+#' 
+#' @keywords print
+#' 
+#' @export
+print.survFitTKTD <- function(x, ...) {
+  # print the model text and the Jags Computing information
+  # for an object of class survFitTKTD
+  
+  # M.C.M.C. informations
+  cat("Model:\n")
+  print(x$model)
+  cat("\nComputing information:\n\n")
+  cat("\n", "Iterations = ", x$n.iter[["start"]], ":",
+      x$n.iter[["end"]], "\n", sep = "")
+  cat("Thinning interval =", x$n.thin, "\n")
+  cat("Number of chains =", x$n.chains, "\n")
+  cat("Sample size per chain =",
+      (x$n.iter[["end"]] - x$n.iter[["start"]]) / x$n.thin + 1, "\n")
+}
diff --git a/R/print.survFitTT.R b/R/print.survFitTT.R
index 7ec61f711fcf269e3460b18b8f7a779d29904697..8253c1d9bd06f85ef625d438a9615a9dadb33830 100644
--- a/R/print.survFitTT.R
+++ b/R/print.survFitTT.R
@@ -1,46 +1,29 @@
-#' Print of \code{survFitTT} object
-#' 
-#' This is the generic \code{print} S3 method for the \code{survFitTT} class.
-#' It prints the underlying JAGS model and some information on the Bayesian 
-#' inference procedure.
-#' 
-#' @param x An object of class \code{survFitTT}
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @return print the model text and the Jags Computing information
-#' 
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#' 
-#' # (2) Create an object of class 'survData'
-#' cadmium1 <- survData(cadmium1)
-#' 
-#' \donttest{
-#' # (3) Run the survFitTT function with the log-logistic
-#' # binomial model
-#' out <- survFitTT(cadmium1, lcx = c(5, 10, 15, 20, 30, 50, 80),
-#'                  quiet = TRUE)
-#' 
-#' # (4) Print the survFitTT object
-#' print(out)
-#' }
-#' 
-#' @keywords print
-#' 
-#' @export
-print.survFitTT <- function(x, ...) {
-  # print the model text and the Jags Computing information
-  # for an object of class survFitTT
-  
-  # M.C.M.C. informations
-  cat("Model:\n")
-  print(x$model)
-  cat("\nComputing information:\n\n")
-  cat("\n", "Iterations = ", x$n.iter[["start"]], ":",
-      x$n.iter[["end"]], "\n", sep = "")
-  cat("Thinning interval =", x$n.thin, "\n")
-  cat("Number of chains =", x$n.chains, "\n")
-  cat("Sample size per chain =",
-      (x$n.iter[["end"]] - x$n.iter[["start"]]) / x$n.thin + 1, "\n")
-}
+#' Print of \code{survFitTT} object
+#' 
+#' This is the generic \code{print} S3 method for the \code{survFitTT} class.
+#' It prints the underlying JAGS model and some information on the Bayesian 
+#' inference procedure.
+#' 
+#' @param x An object of class \code{survFitTT}
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @return print the model text and the Jags Computing information
+#' 
+#' @keywords print
+#' 
+#' @export
+print.survFitTT <- function(x, ...) {
+  # print the model text and the Jags Computing information
+  # for an object of class survFitTT
+  
+  # M.C.M.C. informations
+  cat("Model:\n")
+  print(x$model)
+  cat("\nComputing information:\n\n")
+  cat("\n", "Iterations = ", x$n.iter[["start"]], ":",
+      x$n.iter[["end"]], "\n", sep = "")
+  cat("Thinning interval =", x$n.thin, "\n")
+  cat("Number of chains =", x$n.chains, "\n")
+  cat("Sample size per chain =",
+      (x$n.iter[["end"]] - x$n.iter[["start"]]) / x$n.thin + 1, "\n")
+}
diff --git a/R/print.survFitVarExp.R b/R/print.survFitVarExp.R
index 9ed8b9588ae3431e2324abc671f29a1fd69fe9c8..fecdfd558c5adfd8bd817f2c44d1c350ed78fe76 100644
--- a/R/print.survFitVarExp.R
+++ b/R/print.survFitVarExp.R
@@ -1,47 +1,32 @@
-#' Print of \code{survFitVarExp} object
-#'
-#' This is the generic \code{print} S3 method for the \code{survFitVarExp} class.
-#' It prints the underlying JAGS model and some information on the Bayesian
-#' inference procedure.
-#'
-#' @param x An object of class \code{survFitVarExp}
-#' @param \dots Further arguments to be passed to generic methods.
-#'
-#' @keywords print
-#' 
-#' @return print the model text and the Jags Computing information
-#' 
-#' @examples
-#' # (1) Load the data
-#' data(propiconazole_pulse_exposure)
-#' 
-#' # (2) Create a survData object
-#' dataset <- survData(propiconazole_pulse_exposure)
-#' 
-#' \donttest{
-#' # (3) Run the survFit function with TKTD model 'SD' or 'IT' 
-#' out <- survFit(dataset, model_type="SD")
-#' 
-#' # (4) Print the survFit object
-#' print(out)
-#' }
-#'
-#' @export
-print.survFitVarExp <- function(x, ...) {
-  # print the model text and the Jags Computing information
-  # for an object of class survFitTKTD
-  
-  mcmcInfo = x$mcmcInfo
-  
-  # M.C.M.C. informations
-  nbr.thin = mcmcInfo$nbr.thin
-  mcmc_info =
-  cat("Model:\n")
-  print(x$model)
-  cat("\nComputing information:\n\n")
-  cat("Number of iterations per chain = ", mcmcInfo$n.iter, "\n")
-  cat("Thinning interval =", mcmcInfo$thin.interval, "\n")
-  cat("Number of chains =", mcmcInfo$n.chains, "\n")
-  cat("Number iterations in warmup per chain =", mcmcInfo$n.warmup, "\n")
-  cat("Sample size per chain =", mcmcInfo$n.iter / mcmcInfo$thin.interval , "\n")
+#' Print of \code{survFitVarExp} object
+#'
+#' This is the generic \code{print} S3 method for the \code{survFitVarExp} class.
+#' It prints the underlying JAGS model and some information on the Bayesian
+#' inference procedure.
+#'
+#' @param x An object of class \code{survFitVarExp}
+#' @param \dots Further arguments to be passed to generic methods.
+#'
+#' @keywords print
+#' 
+#' @return print the model text and the Jags Computing information
+#'
+#' @export
+print.survFitVarExp <- function(x, ...) {
+  # print the model text and the Jags Computing information
+  # for an object of class survFitTKTD
+  
+  mcmcInfo = x$mcmcInfo
+  
+  # M.C.M.C. informations
+  nbr.thin = mcmcInfo$nbr.thin
+  mcmc_info =
+  cat("Model:\n")
+  print(x$model)
+  cat("\nComputing information:\n\n")
+  cat("Number of iterations per chain = ", mcmcInfo$n.iter, "\n")
+  cat("Thinning interval =", mcmcInfo$thin.interval, "\n")
+  cat("Number of chains =", mcmcInfo$n.chains, "\n")
+  cat("Number iterations in warmup per chain =", mcmcInfo$n.warmup, "\n")
+  cat("Sample size per chain =", mcmcInfo$n.iter / mcmcInfo$thin.interval , "\n")
 }
\ No newline at end of file
diff --git a/R/reproData.R b/R/reproData.R
index 4f68f42a2d9863d2b45bd82d4332f896e2c804e1..f08034cb094d0fde2ee0aae35018d93e58a77ece 100644
--- a/R/reproData.R
+++ b/R/reproData.R
@@ -26,15 +26,6 @@
 #'
 #' @keywords transformation
 #'
-#' @examples
-#'
-#' # (1) Load reproduction dataset
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "reproData"
-#' dat <- reproData(cadmium1)
-#' class(dat)
-#'
 #' @export
 reproData <- function(x) {
 
diff --git a/R/reproDataCheck.R b/R/reproDataCheck.R
index 4d04a10cec9967363912ed368bd27e804cc934c0..5357041aa6fdd6df6555ff627ae897f8fab823a6 100644
--- a/R/reproDataCheck.R
+++ b/R/reproDataCheck.R
@@ -35,18 +35,6 @@
 #'
 #' @keywords check
 #'
-#' @examples
-#'
-#' # Run the check data function
-#' data(copper)
-#' reproDataCheck(copper)
-#'
-#' # Now we insert an error in the data set, by setting a non-zero number of
-#' # offspring at some time, although there is no surviving individual in the
-#' # replicate from the previous time point.
-#' copper[148, "Nrepro"] <- as.integer(1)
-#' reproDataCheck(copper)
-#'
 #' @export
 reproDataCheck <- function(data, diagnosis.plot = TRUE) {
 
diff --git a/R/reproFitTT.R b/R/reproFitTT.R
index 63c1a691ed563b7a94cf0d83cbe4c8cf0afc9ca1..3e9d7feea9b11e4c724dcd98af7478f025f8bfa4 100644
--- a/R/reproFitTT.R
+++ b/R/reproFitTT.R
@@ -1,465 +1,451 @@
-#' Fits a Bayesian concentration-effect model for target-time reproduction analysis
-#'
-#' This function estimates the parameters of a concentration-effect model for
-#' target-time reproduction analysis using Bayesian inference.
-#' In this model the endpoint is the cumulated number of reproduction outputs over
-#' time, with potential mortality all along the experiment.
-#'
-#' Because some individuals may die during the observation period, the
-#' reproduction rate alone is not sufficient to account for the observed number
-#' of offspring at a given time point. In addition, we need the time individuals have stayed alive
-#' during this observation period. The \code{reproFitTT} function estimates the number
-#' of individual-days in an experiment between its start and the target time.
-#' This covariable is then used to estimate a relation between the chemical compound
-#' concentration and the reproduction rate \emph{per individual-day}.
-#'
-#' The \code{reproFitTT} function fits two models, one where inter-individual
-#' variability is neglected ("Poisson" model) and one where it is taken into
-#' account ("gamma-Poisson" model). When setting \code{stoc.part} to
-#' \code{"bestfit"}, a model comparison procedure is used to choose between
-#' both. More details are presented in the vignette accompanying the package.
-#'
-#' @param data an object of class \code{reproData}
-#' @param stoc.part stochastic part of the model. Possible values are \code{"bestfit"},
-#' \code{"poisson"} and \code{"gammapoisson"}
-#' @param target.time defines the target time point at which to analyse the repro data. By default the last time point
-#' @param ecx desired values of \eqn{x} (in percent) for which to compute
-#' \eqn{EC_{x}}{ECx}
-#' @param n.chains number of MCMC chains. The minimum required number of chains is 2
-#' @param quiet if \code{TRUE}, does not print messages and progress bars from JAGS
-#'
-#'
-#' @return The function returns an object of class \code{reproFitTT} which is a list
-#' of the following objects:
-#' \item{DIC}{DIC value of the selected model}
-#' \item{estim.ECx}{a table of the estimated 5, 10, 20 and 50 \% effective
-#' concentrations (by default) and their 95 \% credible intervals}
-#' \item{estim.par}{a table of the estimated parameters as medians and 95 \%
-#' credible intervals}
-#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior distribution}
-#' \item{model}{a JAGS model object}
-#' \item{warnings}{a data.frame with warning messages}
-#' \item{model.label}{a character string, \code{"P"} if the Poisson model is used,
-#' \code{"GP"} if the gamma-Poisson is used}
-#' \item{parameters}{a list of the parameter names used in the model}
-#' \item{n.chains}{an integer value corresponding to the number of chains used
-#' for the MCMC computation}
-#' \item{n.iter}{a list of two indices indicating the beginning and
-#' the end of monitored iterations}
-#' \item{n.thin}{a numerical value corresponding to the thinning interval}
-#' \item{jags.data}{a list of the data passed to the jags model}
-#' \item{transformed.data}{the \code{survData} object passed to the function}
-#' \item{dataTT}{the dataset with which the parameters are estimated}
-#'
-#' @keywords estimation
-#'
-#' @examples
-#'
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "reproData"
-#' dataset <- reproData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the reproFitTT function with the log-logistic gamma-Poisson model
-#' out <- reproFitTT(dataset, stoc.part = "gammapoisson",
-#'                   ecx = c(5, 10, 15, 20, 30, 50, 80), quiet = TRUE)
-#' }
-#'
-#' @import rjags
-#'
-#' @export
-reproFitTT <- function(data,
-                       stoc.part = "bestfit",
-                       target.time = NULL,
-                       ecx = c(5, 10, 20, 50),
-                       n.chains = 3,
-                       quiet = FALSE) {
-  # test class object
-  if (! is(data, "reproData"))
-    stop("reproFitTT: object of class reproData expected")
-
-  # stocastic verification
-  stoc.partpossible <- c("poisson", "gammapoisson", "bestfit")
-
-  if (!any(stoc.partpossible == stoc.part))
-    stop("Invalid value for argument [stoc.part]")
-
-  # check 0 Nreprocumul
-  if (all(data$Nreprocumul == 0))
-    stop("Nreprocumul contains only 0 values !")
-
-  # parameters
-  parameters <- list(poisson = c("d", "log10b", "log10e"),
-                     gammapoisson = c("d", "log10b","log10e", "log10omega"))
-
-  # select Data at target.time
-  dataTT <- selectDataTT(data, target.time)
-
-  # create priors parameters
-  jags.data <- reproCreateJagsData(stoc.part, dataTT)
-
-  # Poisson model only
-  if (stoc.part == "poisson") {
-    # Define model
-    poisson.model <- reproLoadPoissonModel(model.program = llm.poisson.model.text,
-                                           data = jags.data,
-                                           n.chains, quiet)
-
-    # Determine sampling parameters
-    poisson.sampling.parameters <- modelSamplingParameters(poisson.model,
-                                                           parameters$poisson,
-                                                           n.chains, quiet)
-
-    if (poisson.sampling.parameters$niter > 100000)
-      stop("The model needs too many iterations to provide reliable parameter estimates !")
-
-    # calcul DIC
-    poisson.DIC <- calcDIC(poisson.model, poisson.sampling.parameters, quiet)
-
-    # list of objet for the coda.sample function
-    coda.arg <- list(model = poisson.model,
-                     model.label = "P",
-                     niter = poisson.sampling.parameters$niter,
-                     thin = poisson.sampling.parameters$thin,
-                     nburnin = poisson.sampling.parameters$burnin,
-                     parameters = parameters$poisson,
-                     DIC = poisson.DIC)
-  }
-
-  # Gamma-poisson model only
-  if (stoc.part == "gammapoisson") {
-    # Define model
-    gammapoisson.model <- reproLoadGammapoissonModel(model.program = llm.gammapoisson.model.text,
-                                                     data = jags.data,
-                                                     n.chains, quiet)
-
-    # Determine sampling parameters
-    gammapoisson.sampling.parameters <- modelSamplingParameters(gammapoisson.model,
-                                                                parameters$gammapoisson,
-                                                                n.chains, quiet)
-
-    if (gammapoisson.sampling.parameters$niter > 100000)
-      stop("The model needs too many iterations to provide reliable parameter estimates !")
-
-    # calcul DIC
-    gammapoisson.DIC <- calcDIC(gammapoisson.model,
-                                gammapoisson.sampling.parameters, quiet)
-
-    # list of objet for the coda.sample function
-    coda.arg <- list(model = gammapoisson.model,
-                     model.label = "GP",
-                     niter = gammapoisson.sampling.parameters$niter,
-                     thin = gammapoisson.sampling.parameters$thin,
-                     nburnin = gammapoisson.sampling.parameters$burnin,
-                     parameters = parameters$gammapoisson,
-                     DIC = gammapoisson.DIC)
-  }
-
-  # Model Selection by the DIC
-  if (stoc.part == "bestfit") {
-    # Define models
-    poisson.model <- reproLoadPoissonModel(model.program = llm.poisson.model.text,
-                                           data = jags.data,
-                                           n.chains, quiet)
-
-    gammapoisson.model <- reproLoadGammapoissonModel(model.program = llm.gammapoisson.model.text,
-                                                     data = jags.data,
-                                                     n.chains, quiet)
-    # Determine sampling parameters
-    poisson.sampling.parameters <- modelSamplingParameters(poisson.model,
-                                                           parameters$poisson,
-                                                           n.chains, quiet)
-
-    gammapoisson.sampling.parameters <- modelSamplingParameters(gammapoisson.model,
-                                                                parameters$gammapoisson,
-                                                                n.chains, quiet)
-
-    if (poisson.sampling.parameters$niter > 100000 && gammapoisson.sampling.parameters$niter > 100000)
-      stop("The model needs too many iterations to provide reliable parameter estimates !")
-
-    # calcul DIC
-    poisson.DIC <- calcDIC(poisson.model, poisson.sampling.parameters, quiet)
-    gammapoisson.DIC <- calcDIC(gammapoisson.model,
-                                gammapoisson.sampling.parameters, quiet)
-
-    if (gammapoisson.sampling.parameters$niter > 100000) {
-      # list of object for the coda.sample function
-      coda.arg <- list(model = poisson.model,
-                       model.label = "P",
-                       niter = poisson.sampling.parameters$niter,
-                       thin = poisson.sampling.parameters$thin,
-                       nburnin = poisson.sampling.parameters$burnin,
-                       parameters = parameters$poisson,
-                       DIC = poisson.DIC)
-    }
-
-    if (poisson.sampling.parameters$niter > 100000) {
-      # list of object for the coda.sample function
-      coda.arg <- list(model = gammapoisson.model,
-                       model.label = "GP",
-                       niter = gammapoisson.sampling.parameters$niter,
-                       thin = gammapoisson.sampling.parameters$thin,
-                       nburnin = gammapoisson.sampling.parameters$burnin,
-                       parameters = parameters$gammapoisson,
-                       DIC = gammapoisson.DIC)
-    }
-    if (poisson.sampling.parameters$niter <= 100000 && gammapoisson.sampling.parameters$niter <= 100000) {
-      if (poisson.DIC <= (gammapoisson.DIC + 10)) {
-        # list of objet for the coda.sample function
-        coda.arg <- list(model = poisson.model,
-                         model.label = "P",
-                         niter = poisson.sampling.parameters$niter,
-                         thin = poisson.sampling.parameters$thin,
-                         nburnin = poisson.sampling.parameters$burnin,
-                         parameters = parameters$poisson,
-                         DIC = poisson.DIC)
-      } else {
-        # list of objet for the coda.sample function
-        coda.arg <- list(model = gammapoisson.model,
-                         model.label = "GP",
-                         niter = gammapoisson.sampling.parameters$niter,
-                         thin = gammapoisson.sampling.parameters$thin,
-                         nburnin = gammapoisson.sampling.parameters$burnin,
-                         parameters = parameters$gammapoisson,
-                         DIC = gammapoisson.DIC)
-      }
-    }
-  }
-
-  # Sampling
-  prog.b <- ifelse(quiet == TRUE, "none", "text")
-  mcmc <- coda.samples(coda.arg$model,
-                       coda.arg$parameters,
-                       n.iter = coda.arg$niter,
-                       thin = coda.arg$thin,
-                       progress.bar = prog.b)
-
-  # summarize estime.par et CIs
-  # calculate from the estimated parameters
-  estim.par <- reproPARAMS(mcmc, coda.arg$model.label)
-
-  # ECx calculation  estimated ECx and their CIs 95%
-  # vector of ECX
-  estim.ECx <- estimXCX(mcmc, ecx, "EC")
-
-  # check if the maximum measured concentration is in the EC50's range of
-  # 95% percentile
-
-  warnings <- msgTableCreate()
-
-  EC50 <- log10(estim.par["e", "median"])
-  if (!(min(log10(data$conc)) < EC50 & EC50 < max(log10(data$conc)))){
-    ##store warning in warnings table
-    msg <- "The EC50 estimation (model parameter e) lies outside the range of
-    tested concentration and may be unreliable as the prior distribution on
-    this parameter is defined from this range !"
-    warnings <- msgTableAdd(warnings, "EC50outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-
-
-  # output
-  OUT <- list(DIC = coda.arg$DIC,
-              estim.ECx = estim.ECx,
-              estim.par = estim.par,
-              det.part = "loglogistic",
-              mcmc = mcmc,
-              warnings = warnings,
-              model = coda.arg$model,
-              model.label = coda.arg$model.label,
-              parameters = coda.arg$parameters,
-              n.chains = summary(mcmc)$nchain,
-              n.iter = list(start = summary(mcmc)$start,
-                            end = summary(mcmc)$end),
-              n.thin = summary(mcmc)$thin,
-              jags.data = jags.data,
-              transformed.data = data,
-              dataTT = dataTT)
-
-  class(OUT) <- "reproFitTT"
-  return(OUT)
-}
-
-
-#' @importFrom stats sd
-reproCreateJagsData <- function(stoc.part, data) {
-  # create the parameters to define the prior of the log-logistic model
-  # for reproduction data analysis
-  # INPUTS
-  # stoc.part: model name
-  # data: object of class reproData
-  # OUTPUT
-  # jags.data : list data require for the jags.model function
-
-
-  # separate control data to the other
-  # tab0: data at conc = 0
-  tab0 <- data[data$conc == min(data$conc), ]
-  # tab: data at conc != 0
-  tab <- data[data$conc != min(data$conc), ]
-
-  Nindtime <- tab$Nindtime
-  NreprocumulIndtime0 <- tab0$Nreprocumul / tab0$Nindtime # cumulated number of
-  # offspring / number of
-  # individual-days
-  conc <- tab$conc
-  Ncumul <- tab$Nreprocumul
-  n <- nrow(tab) # number of observation != from the control
-
-  # Parameter calculation of concentration min and max
-  concmin <- min(sort(unique(conc))[-1])
-  concmax <- max(conc)
-
-  # create priors parameters for the log logistic model
-
-  # Params to define log10e
-  meanlog10e <- (log10(concmin) + log10(concmax)) / 2
-  sdlog10e <- (log10(concmax) - log10(concmin)) / 4
-  taulog10e <- 1 / sdlog10e^2
-
-  # Params to define d
-  meand <- mean(NreprocumulIndtime0)
-  SEd <- sd(NreprocumulIndtime0) / sqrt(length(unique(tab0$replicate)))
-  taud <- 1 / (SEd)^2
-
-  # Params to define b
-  log10bmin <- -2
-  log10bmax <- 2
-
-  # list of data use by jags
-  jags.data <- list(meanlog10e = meanlog10e,
-                    taulog10e = taulog10e,
-                    meand = meand,
-                    taud = taud,
-                    log10bmin = log10bmin,
-                    log10bmax = log10bmax,
-                    n = n,
-                    xconc = conc,
-                    Nindtime = Nindtime,
-                    Ncumul = Ncumul)
-
-  # Params to define overdispersion rate
-  if (stoc.part == "bestfit" || stoc.part == "gammapoisson") {
-    log10omegamin <- -4
-    log10omegamax <- 4
-
-    # list of data use by jags
-    jags.data <- c(jags.data,
-                   log10omegamin = log10omegamin,
-                   log10omegamax = log10omegamax)
-  }
-  return(jags.data)
-}
-
-reproLoadPoissonModel <- function(model.program,
-                                  data,
-                                  n.chains,
-                                  quiet = quiet) {
-                                    # sub function to load jags poisson model
-                                    reproLoadModel(model.program, F, data, n.chains, quiet = quiet)
-                                  }
-
-reproLoadGammapoissonModel <- function(model.program,
-                                       data,
-                                       n.chains,
-                                       quiet = quiet) {
-                                         # sub function to load jags gamma poisson model
-                                         reproLoadModel(model.program, T, data, n.chains, quiet = quiet)
-}
-
-#' @import rjags
-reproLoadModel <- function(model.program,
-                           lr.bound.keep,
-                           data,
-                           n.chains,
-                           Nadapt = 3000,
-                           quiet = quiet) {
-  # create the JAGS model object and called by reproLoadPoissonModel
-  # and reproLoadGammapoissonModel
-  # INPUTS:
-  # - model.program: character string containing a jags model description
-  # - lr.bound.keep: boolean value to use omega parameter or not
-  # - data: list of data created by reproCreateJagsData
-  # - nchains: Number of chains desired
-  # - Nadapt: length of the adaptation phase
-  # - quiet: silent option
-  # OUTPUT:
-  # - JAGS model
-
-  # delisting of lr.bound because not used in the function
-  if (!lr.bound.keep) {
-    data[c("meanlog10omega", "taulog10omega", "log10omegamin",
-           "log10omegamax")] <- NULL
-    }
-
-    # load model text in a temporary file
-    model.file <- tempfile() # temporary file address
-    fileC <- file(model.file) # open connection
-    writeLines(model.program, fileC) # write text in temporary file
-    close(fileC) # close connection to temporary file
-    # creation of the jags model
-    model <- jags.model(file = model.file, data = data, n.chains = n.chains,
-                        n.adapt = Nadapt, quiet = quiet)
-    unlink(model.file)
-    return(model)
-}
-
-reproPARAMS <- function(mcmc, MODEL = "P") {
-  # create the table of posterior estimated parameters
-  # for the reproduction analyses
-  # INPUT:
-  # - mcmc:  list of estimated parameters for the model with each item representing
-  # a chain
-  # - MODEL: a position flag model with P: poisson model and GP: gammapoisson
-  # model
-  # OUTPUT:
-  # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
-  # parameters)
-
-  # Retrieving parameters of the model
-  res.M <- summary(mcmc)
-
-  b <- 10^res.M$quantiles["log10b", "50%"]
-  d <- res.M$quantiles["d", "50%"]
-  e <- 10^res.M$quantiles["log10e", "50%"]
-  binf <- 10^res.M$quantiles["log10b", "2.5%"]
-  dinf <- res.M$quantiles["d", "2.5%"]
-  einf <- 10^res.M$quantiles["log10e", "2.5%"]
-  bsup <- 10^res.M$quantiles["log10b", "97.5%"]
-  dsup <- res.M$quantiles["d", "97.5%"]
-  esup <- 10^res.M$quantiles["log10e", "97.5%"]
-
-  # Definition of the parameter storage and storage data
-
-  # If Poisson Model
-  if (MODEL == "P") {
-    rownames <- c("b", "d", "e")
-    params <- c(b, d, e)
-    CIinf <- c(binf, dinf, einf)
-    CIsup <- c(bsup, dsup, esup)
-  }
-  # If Gamma Poisson Model
-  if (MODEL == "GP") {
-    # Calculation of the parameter omega
-    omega <- 10^res.M$quantiles["log10omega", "50%"]
-    omegainf <- 10^res.M$quantiles["log10omega", "2.5%"]
-    omegasup <- 10^res.M$quantiles["log10omega", "97.5%"]
-    # Definition of the parameter storage and storage data
-    rownames <- c("b", "d", "e", "omega")
-    params <- c(b, d, e, omega)
-    CIinf <- c(binf, dinf, einf, omegainf)
-    CIsup <- c(bsup, dsup, esup, omegasup)
-  }
-
-  res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
-                    row.names = rownames)
-
-  return(res)
-}
-
-llm.poisson.model.text <- "\nmodel # Loglogistic Poisson model\n{\n#\nfor (j in 1:n) # loop on replicates\n{\n# Explicit writting of a Poisson law for each replicate\n# mean is given by the theoretical curve\nytheo[j] <- d / (1 + pow(xconc[j]/e, b))\nnbtheo[j] <- ytheo[j]*Nindtime[j]\nNcumul[j] ~ dpois(nbtheo[j])\n}\n# Prior distributions\nd ~ dnorm(meand, taud)T(0,)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10,log10b)\ne <- pow(10,log10e)\n}\n"
-
-llm.gammapoisson.model.text <- "\nmodel # Loglogisitc Gamma poisson model\n{\n#\nfor (j in 1:n) # loop on replicates\n{\n# Explicit writting of a gamma-Poisson law for each replicate\n# the mean is given by a gamma law centered on the theoretical curve\nrate[j] <- d / (1 + pow(xconc[j]/e, b)) / omega\np[j] <- 1 / (Nindtime[j] * omega + 1)\nNcumul[j] ~ dnegbin(p[j], rate[j])\n}\n# Prior distributions\nd ~ dnorm(meand, taud)T(0,)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\nlog10omega ~ dunif(log10omegamin, log10omegamax)\n\nomega <- pow(10,log10omega)\nb <- pow(10,log10b)\ne <- pow(10,log10e)\n}\n"
+#' Fits a Bayesian concentration-effect model for target-time reproduction analysis
+#'
+#' This function estimates the parameters of a concentration-effect model for
+#' target-time reproduction analysis using Bayesian inference.
+#' In this model the endpoint is the cumulated number of reproduction outputs over
+#' time, with potential mortality all along the experiment.
+#'
+#' Because some individuals may die during the observation period, the
+#' reproduction rate alone is not sufficient to account for the observed number
+#' of offspring at a given time point. In addition, we need the time individuals have stayed alive
+#' during this observation period. The \code{reproFitTT} function estimates the number
+#' of individual-days in an experiment between its start and the target time.
+#' This covariable is then used to estimate a relation between the chemical compound
+#' concentration and the reproduction rate \emph{per individual-day}.
+#'
+#' The \code{reproFitTT} function fits two models, one where inter-individual
+#' variability is neglected ("Poisson" model) and one where it is taken into
+#' account ("gamma-Poisson" model). When setting \code{stoc.part} to
+#' \code{"bestfit"}, a model comparison procedure is used to choose between
+#' both. More details are presented in the vignette accompanying the package.
+#'
+#' @param data an object of class \code{reproData}
+#' @param stoc.part stochastic part of the model. Possible values are \code{"bestfit"},
+#' \code{"poisson"} and \code{"gammapoisson"}
+#' @param target.time defines the target time point at which to analyse the repro data. By default the last time point
+#' @param ecx desired values of \eqn{x} (in percent) for which to compute
+#' \eqn{EC_{x}}{ECx}
+#' @param n.chains number of MCMC chains. The minimum required number of chains is 2
+#' @param quiet if \code{TRUE}, does not print messages and progress bars from JAGS
+#'
+#'
+#' @return The function returns an object of class \code{reproFitTT} which is a list
+#' of the following objects:
+#' \item{DIC}{DIC value of the selected model}
+#' \item{estim.ECx}{a table of the estimated 5, 10, 20 and 50 \% effective
+#' concentrations (by default) and their 95 \% credible intervals}
+#' \item{estim.par}{a table of the estimated parameters as medians and 95 \%
+#' credible intervals}
+#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior distribution}
+#' \item{model}{a JAGS model object}
+#' \item{warnings}{a data.frame with warning messages}
+#' \item{model.label}{a character string, \code{"P"} if the Poisson model is used,
+#' \code{"GP"} if the gamma-Poisson is used}
+#' \item{parameters}{a list of the parameter names used in the model}
+#' \item{n.chains}{an integer value corresponding to the number of chains used
+#' for the MCMC computation}
+#' \item{n.iter}{a list of two indices indicating the beginning and
+#' the end of monitored iterations}
+#' \item{n.thin}{a numerical value corresponding to the thinning interval}
+#' \item{jags.data}{a list of the data passed to the jags model}
+#' \item{transformed.data}{the \code{survData} object passed to the function}
+#' \item{dataTT}{the dataset with which the parameters are estimated}
+#'
+#' @keywords estimation
+#'
+#' @import rjags
+#'
+#' @export
+reproFitTT <- function(data,
+                       stoc.part = "bestfit",
+                       target.time = NULL,
+                       ecx = c(5, 10, 20, 50),
+                       n.chains = 3,
+                       quiet = FALSE) {
+  # test class object
+  if (!is(data, "reproData"))
+    stop("reproFitTT: object of class reproData expected")
+
+  # stocastic verification
+  stoc.partpossible <- c("poisson", "gammapoisson", "bestfit")
+
+  if (!any(stoc.partpossible == stoc.part))
+    stop("Invalid value for argument [stoc.part]")
+
+  # check 0 Nreprocumul
+  if (all(data$Nreprocumul == 0))
+    stop("Nreprocumul contains only 0 values !")
+
+  # parameters
+  parameters <- list(poisson = c("d", "log10b", "log10e"),
+                     gammapoisson = c("d", "log10b","log10e", "log10omega"))
+
+  # select Data at target.time
+  dataTT <- selectDataTT(data, target.time)
+
+  # create priors parameters
+  jags.data <- reproCreateJagsData(stoc.part, dataTT)
+
+  # Poisson model only
+  if (stoc.part == "poisson") {
+    # Define model
+    poisson.model <- reproLoadPoissonModel(model.program = llm.poisson.model.text,
+                                           data = jags.data,
+                                           n.chains, quiet)
+
+    # Determine sampling parameters
+    poisson.sampling.parameters <- modelSamplingParameters(poisson.model,
+                                                           parameters$poisson,
+                                                           n.chains, quiet)
+
+    if (poisson.sampling.parameters$niter > 100000)
+      stop("The model needs too many iterations to provide reliable parameter estimates !")
+
+    # calcul DIC
+    poisson.DIC <- calcDIC(poisson.model, poisson.sampling.parameters, quiet)
+
+    # list of objet for the coda.sample function
+    coda.arg <- list(model = poisson.model,
+                     model.label = "P",
+                     niter = poisson.sampling.parameters$niter,
+                     thin = poisson.sampling.parameters$thin,
+                     nburnin = poisson.sampling.parameters$burnin,
+                     parameters = parameters$poisson,
+                     DIC = poisson.DIC)
+  }
+
+  # Gamma-poisson model only
+  if (stoc.part == "gammapoisson") {
+    # Define model
+    gammapoisson.model <- reproLoadGammapoissonModel(model.program = llm.gammapoisson.model.text,
+                                                     data = jags.data,
+                                                     n.chains, quiet)
+
+    # Determine sampling parameters
+    gammapoisson.sampling.parameters <- modelSamplingParameters(gammapoisson.model,
+                                                                parameters$gammapoisson,
+                                                                n.chains, quiet)
+
+    if (gammapoisson.sampling.parameters$niter > 100000)
+      stop("The model needs too many iterations to provide reliable parameter estimates !")
+
+    # calcul DIC
+    gammapoisson.DIC <- calcDIC(gammapoisson.model,
+                                gammapoisson.sampling.parameters, quiet)
+
+    # list of objet for the coda.sample function
+    coda.arg <- list(model = gammapoisson.model,
+                     model.label = "GP",
+                     niter = gammapoisson.sampling.parameters$niter,
+                     thin = gammapoisson.sampling.parameters$thin,
+                     nburnin = gammapoisson.sampling.parameters$burnin,
+                     parameters = parameters$gammapoisson,
+                     DIC = gammapoisson.DIC)
+  }
+
+  # Model Selection by the DIC
+  if (stoc.part == "bestfit") {
+    # Define models
+    poisson.model <- reproLoadPoissonModel(model.program = llm.poisson.model.text,
+                                           data = jags.data,
+                                           n.chains, quiet)
+
+    gammapoisson.model <- reproLoadGammapoissonModel(model.program = llm.gammapoisson.model.text,
+                                                     data = jags.data,
+                                                     n.chains, quiet)
+    # Determine sampling parameters
+    poisson.sampling.parameters <- modelSamplingParameters(poisson.model,
+                                                           parameters$poisson,
+                                                           n.chains, quiet)
+
+    gammapoisson.sampling.parameters <- modelSamplingParameters(gammapoisson.model,
+                                                                parameters$gammapoisson,
+                                                                n.chains, quiet)
+
+    if (poisson.sampling.parameters$niter > 100000 && gammapoisson.sampling.parameters$niter > 100000)
+      stop("The model needs too many iterations to provide reliable parameter estimates !")
+
+    # calcul DIC
+    poisson.DIC <- calcDIC(poisson.model, poisson.sampling.parameters, quiet)
+    gammapoisson.DIC <- calcDIC(gammapoisson.model,
+                                gammapoisson.sampling.parameters, quiet)
+
+    if (gammapoisson.sampling.parameters$niter > 100000) {
+      # list of object for the coda.sample function
+      coda.arg <- list(model = poisson.model,
+                       model.label = "P",
+                       niter = poisson.sampling.parameters$niter,
+                       thin = poisson.sampling.parameters$thin,
+                       nburnin = poisson.sampling.parameters$burnin,
+                       parameters = parameters$poisson,
+                       DIC = poisson.DIC)
+    }
+
+    if (poisson.sampling.parameters$niter > 100000) {
+      # list of object for the coda.sample function
+      coda.arg <- list(model = gammapoisson.model,
+                       model.label = "GP",
+                       niter = gammapoisson.sampling.parameters$niter,
+                       thin = gammapoisson.sampling.parameters$thin,
+                       nburnin = gammapoisson.sampling.parameters$burnin,
+                       parameters = parameters$gammapoisson,
+                       DIC = gammapoisson.DIC)
+    }
+    if (poisson.sampling.parameters$niter <= 100000 && gammapoisson.sampling.parameters$niter <= 100000) {
+      if (poisson.DIC <= (gammapoisson.DIC + 10)) {
+        # list of objet for the coda.sample function
+        coda.arg <- list(model = poisson.model,
+                         model.label = "P",
+                         niter = poisson.sampling.parameters$niter,
+                         thin = poisson.sampling.parameters$thin,
+                         nburnin = poisson.sampling.parameters$burnin,
+                         parameters = parameters$poisson,
+                         DIC = poisson.DIC)
+      } else {
+        # list of objet for the coda.sample function
+        coda.arg <- list(model = gammapoisson.model,
+                         model.label = "GP",
+                         niter = gammapoisson.sampling.parameters$niter,
+                         thin = gammapoisson.sampling.parameters$thin,
+                         nburnin = gammapoisson.sampling.parameters$burnin,
+                         parameters = parameters$gammapoisson,
+                         DIC = gammapoisson.DIC)
+      }
+    }
+  }
+
+  # Sampling
+  prog.b <- ifelse(quiet == TRUE, "none", "text")
+  mcmc <- coda.samples(coda.arg$model,
+                       coda.arg$parameters,
+                       n.iter = coda.arg$niter,
+                       thin = coda.arg$thin,
+                       progress.bar = prog.b)
+
+  # summarize estime.par et CIs
+  # calculate from the estimated parameters
+  estim.par <- reproPARAMS(mcmc, coda.arg$model.label)
+
+  # ECx calculation  estimated ECx and their CIs 95%
+  # vector of ECX
+  estim.ECx <- estimXCX(mcmc, ecx, "EC")
+
+  # check if the maximum measured concentration is in the EC50's range of
+  # 95% percentile
+
+  warnings <- msgTableCreate()
+
+  EC50 <- log10(estim.par["e", "median"])
+  if (!(min(log10(data$conc)) < EC50 & EC50 < max(log10(data$conc)))){
+    ##store warning in warnings table
+    msg <- "The EC50 estimation (model parameter e) lies outside the range of
+    tested concentration and may be unreliable as the prior distribution on
+    this parameter is defined from this range !"
+    warnings <- msgTableAdd(warnings, "EC50outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+
+
+  # output
+  OUT <- list(DIC = coda.arg$DIC,
+              estim.ECx = estim.ECx,
+              estim.par = estim.par,
+              det.part = "loglogistic",
+              mcmc = mcmc,
+              warnings = warnings,
+              model = coda.arg$model,
+              model.label = coda.arg$model.label,
+              parameters = coda.arg$parameters,
+              n.chains = summary(mcmc)$nchain,
+              n.iter = list(start = summary(mcmc)$start,
+                            end = summary(mcmc)$end),
+              n.thin = summary(mcmc)$thin,
+              jags.data = jags.data,
+              transformed.data = data,
+              dataTT = dataTT)
+
+  class(OUT) <- "reproFitTT"
+  return(OUT)
+}
+
+
+#' @importFrom stats sd
+reproCreateJagsData <- function(stoc.part, data) {
+  # create the parameters to define the prior of the log-logistic model
+  # for reproduction data analysis
+  # INPUTS
+  # stoc.part: model name
+  # data: object of class reproData
+  # OUTPUT
+  # jags.data : list data require for the jags.model function
+
+
+  # separate control data to the other
+  # tab0: data at conc = 0
+  tab0 <- data[data$conc == min(data$conc), ]
+  # tab: data at conc != 0
+  tab <- data[data$conc != min(data$conc), ]
+
+  Nindtime <- tab$Nindtime
+  NreprocumulIndtime0 <- tab0$Nreprocumul / tab0$Nindtime # cumulated number of
+  # offspring / number of
+  # individual-days
+  conc <- tab$conc
+  Ncumul <- tab$Nreprocumul
+  n <- nrow(tab) # number of observation != from the control
+
+  # Parameter calculation of concentration min and max
+  concmin <- min(sort(unique(conc))[-1])
+  concmax <- max(conc)
+
+  # create priors parameters for the log logistic model
+
+  # Params to define log10e
+  meanlog10e <- (log10(concmin) + log10(concmax)) / 2
+  sdlog10e <- (log10(concmax) - log10(concmin)) / 4
+  taulog10e <- 1 / sdlog10e^2
+
+  # Params to define d
+  meand <- mean(NreprocumulIndtime0)
+  SEd <- sd(NreprocumulIndtime0) / sqrt(length(unique(tab0$replicate)))
+  taud <- 1 / (SEd)^2
+
+  # Params to define b
+  log10bmin <- -2
+  log10bmax <- 2
+
+  # list of data use by jags
+  jags.data <- list(meanlog10e = meanlog10e,
+                    taulog10e = taulog10e,
+                    meand = meand,
+                    taud = taud,
+                    log10bmin = log10bmin,
+                    log10bmax = log10bmax,
+                    n = n,
+                    xconc = conc,
+                    Nindtime = Nindtime,
+                    Ncumul = Ncumul)
+
+  # Params to define overdispersion rate
+  if (stoc.part == "bestfit" || stoc.part == "gammapoisson") {
+    log10omegamin <- -4
+    log10omegamax <- 4
+
+    # list of data use by jags
+    jags.data <- c(jags.data,
+                   log10omegamin = log10omegamin,
+                   log10omegamax = log10omegamax)
+  }
+  return(jags.data)
+}
+
+reproLoadPoissonModel <- function(model.program,
+                                  data,
+                                  n.chains,
+                                  quiet = quiet) {
+                                    # sub function to load jags poisson model
+                                    reproLoadModel(model.program, F, data, n.chains, quiet = quiet)
+                                  }
+
+reproLoadGammapoissonModel <- function(model.program,
+                                       data,
+                                       n.chains,
+                                       quiet = quiet) {
+                                         # sub function to load jags gamma poisson model
+                                         reproLoadModel(model.program, T, data, n.chains, quiet = quiet)
+}
+
+#' @import rjags
+reproLoadModel <- function(model.program,
+                           lr.bound.keep,
+                           data,
+                           n.chains,
+                           Nadapt = 3000,
+                           quiet = quiet) {
+  # create the JAGS model object and called by reproLoadPoissonModel
+  # and reproLoadGammapoissonModel
+  # INPUTS:
+  # - model.program: character string containing a jags model description
+  # - lr.bound.keep: boolean value to use omega parameter or not
+  # - data: list of data created by reproCreateJagsData
+  # - nchains: Number of chains desired
+  # - Nadapt: length of the adaptation phase
+  # - quiet: silent option
+  # OUTPUT:
+  # - JAGS model
+
+  # delisting of lr.bound because not used in the function
+  if (!lr.bound.keep) {
+    data[c("meanlog10omega", "taulog10omega", "log10omegamin",
+           "log10omegamax")] <- NULL
+    }
+
+    # load model text in a temporary file
+    model.file <- tempfile() # temporary file address
+    fileC <- file(model.file) # open connection
+    writeLines(model.program, fileC) # write text in temporary file
+    close(fileC) # close connection to temporary file
+    # creation of the jags model
+    model <- jags.model(file = model.file, data = data, n.chains = n.chains,
+                        n.adapt = Nadapt, quiet = quiet)
+    unlink(model.file)
+    return(model)
+}
+
+reproPARAMS <- function(mcmc, MODEL = "P") {
+  # create the table of posterior estimated parameters
+  # for the reproduction analyses
+  # INPUT:
+  # - mcmc:  list of estimated parameters for the model with each item representing
+  # a chain
+  # - MODEL: a position flag model with P: poisson model and GP: gammapoisson
+  # model
+  # OUTPUT:
+  # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
+  # parameters)
+
+  # Retrieving parameters of the model
+  res.M <- summary(mcmc)
+
+  b <- 10^res.M$quantiles["log10b", "50%"]
+  d <- res.M$quantiles["d", "50%"]
+  e <- 10^res.M$quantiles["log10e", "50%"]
+  binf <- 10^res.M$quantiles["log10b", "2.5%"]
+  dinf <- res.M$quantiles["d", "2.5%"]
+  einf <- 10^res.M$quantiles["log10e", "2.5%"]
+  bsup <- 10^res.M$quantiles["log10b", "97.5%"]
+  dsup <- res.M$quantiles["d", "97.5%"]
+  esup <- 10^res.M$quantiles["log10e", "97.5%"]
+
+  # Definition of the parameter storage and storage data
+
+  # If Poisson Model
+  if (MODEL == "P") {
+    rownames <- c("b", "d", "e")
+    params <- c(b, d, e)
+    CIinf <- c(binf, dinf, einf)
+    CIsup <- c(bsup, dsup, esup)
+  }
+  # If Gamma Poisson Model
+  if (MODEL == "GP") {
+    # Calculation of the parameter omega
+    omega <- 10^res.M$quantiles["log10omega", "50%"]
+    omegainf <- 10^res.M$quantiles["log10omega", "2.5%"]
+    omegasup <- 10^res.M$quantiles["log10omega", "97.5%"]
+    # Definition of the parameter storage and storage data
+    rownames <- c("b", "d", "e", "omega")
+    params <- c(b, d, e, omega)
+    CIinf <- c(binf, dinf, einf, omegainf)
+    CIsup <- c(bsup, dsup, esup, omegasup)
+  }
+
+  res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
+                    row.names = rownames)
+
+  return(res)
+}
+
+llm.poisson.model.text <- "\nmodel # Loglogistic Poisson model\n{\n#\nfor (j in 1:n) # loop on replicates\n{\n# Explicit writting of a Poisson law for each replicate\n# mean is given by the theoretical curve\nytheo[j] <- d / (1 + pow(xconc[j]/e, b))\nnbtheo[j] <- ytheo[j]*Nindtime[j]\nNcumul[j] ~ dpois(nbtheo[j])\n}\n# Prior distributions\nd ~ dnorm(meand, taud)T(0,)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10,log10b)\ne <- pow(10,log10e)\n}\n"
+
+llm.gammapoisson.model.text <- "\nmodel # Loglogisitc Gamma poisson model\n{\n#\nfor (j in 1:n) # loop on replicates\n{\n# Explicit writting of a gamma-Poisson law for each replicate\n# the mean is given by a gamma law centered on the theoretical curve\nrate[j] <- d / (1 + pow(xconc[j]/e, b)) / omega\np[j] <- 1 / (Nindtime[j] * omega + 1)\nNcumul[j] ~ dnegbin(p[j], rate[j])\n}\n# Prior distributions\nd ~ dnorm(meand, taud)T(0,)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\nlog10omega ~ dunif(log10omegamin, log10omegamax)\n\nomega <- pow(10,log10omega)\nb <- pow(10,log10b)\ne <- pow(10,log10e)\n}\n"
diff --git a/R/summary.reproData.R b/R/summary.reproData.R
index a9f642d9beeff6e1cc941d3747c7d4353ef9ba62..6f91348e81912ffc7dce262c06de61c8f068b9c8 100644
--- a/R/summary.reproData.R
+++ b/R/summary.reproData.R
@@ -12,16 +12,6 @@
 #' \code{\link{summary.survDataCstExp}} plus an additional one:
 #' \item{NboffTimeConc}{nb of offspring for all concentrations and time points}
 #' 
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#' 
-#' # (2) Create a reproData object
-#' cadmium1 <- reproData(cadmium1)
-#' 
-#' # (3) Summarize the data set
-#' summary(cadmium1)
-#' 
 #' @keywords summary
 #' 
 #' @export
diff --git a/R/summary.reproFitTT.R b/R/summary.reproFitTT.R
index 9b400df958aca42094168fa5ff026075ede8c4d5..b14d3c43ed6a7a9d3c939bd2d2bad766e960afef 100644
--- a/R/summary.reproFitTT.R
+++ b/R/summary.reproFitTT.R
@@ -1,105 +1,88 @@
-#' Summary of \code{reproFitTT} object
-#'
-#' This is the generic \code{summary} S3 method for the \code{reproFitTT} class.
-#' It shows the quantiles of priors and posteriors on parameters
-#' and the quantiles of the posterior on the ECx estimates.
-#'
-#' @param object an object of class \code{reproFitTT}
-#' @param quiet when \code{TRUE}, does not print
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @return The function returns a list with the following information:
-#' \item{Qpriors}{quantiles of the model priors}
-#' \item{Qposteriors}{quantiles of the model posteriors}
-#' \item{QECx}{quantiles of ECx estimates}
-#'
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create a reproData object
-#' cadmium1 <- reproData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the reproFitTT function with the log-logistic
-#' # model
-#' out <- reproFitTT(cadmium1, ecx = c(5, 10, 15, 20, 30, 50, 80),
-#' quiet = TRUE)
-#'
-#' # (4) summarize the reproFitTT object
-#' summary(out)
-#' }
-#'
-#' @keywords summary
-#' 
-#' @importFrom stats qnorm qunif
-#' 
-#' @export
-summary.reproFitTT <- function(object, quiet = FALSE, ...) {
-
-  # quantiles of priors parameters
-  n.iter <- object$n.iter$end - object$n.iter$start
-
-  # b
-  log10b <- qunif(p = c(0.5, 0.025, 0.975),
-                  min = object$jags.data$log10bmin,
-                  max = object$jags.data$log10bmax)
-
-  b <- 10^log10b
-
-  # d
-  d <- qnorm(p = c(0.5, 0.025, 0.975),
-             mean = object$jags.data$meand,
-             sd = 1 / sqrt(object$jags.data$taud))
-
-  # e
-  log10e <- qnorm(p = c(0.5, 0.025, 0.975),
-                  mean = object$jags.data$meanlog10e,
-                  sd = 1 / sqrt(object$jags.data$taulog10e))
-
-  e <- 10^log10e
-
-  if (object$model.label == "P") {
-    res <- rbind(b, d, e)
-  }
-  if (object$model.label == "GP") {
-    # omega
-    log10omega <- qunif(p = c(0.5, 0.025, 0.975),
-                        min = object$jags.data$log10omegamin,
-                        max = object$jags.data$log10omegamax)
-
-    omega <- 10^log10omega
-
-    res <- rbind(b, d, e, omega)
-  }
-
-  ans1 <-  format(data.frame(res), scientific = TRUE, digits = 4)
-  colnames(ans1) <- c("50%", "2.5%", "97.5%")
-
-  # quantiles of estimated model parameters
-  ans2 <- format(object$estim.par, scientific = TRUE, digits = 4)
-  colnames(ans2) <- c("50%", "2.5%", "97.5%")
-
-  # estimated ECx and their CIs 95%
-  ans3 <- format(object$estim.ECx, scientific = TRUE, digits = 4)
-  colnames(ans3) <- c("50%", "2.5%", "97.5%")
-
-  if (! quiet) {
-    cat("Summary: \n\n")
-    if (object$model.label == "GP")
-      cat("The ", object$det.part, " model with a Gamma Poisson stochastic part was used !\n\n")
-    if(object$model.label == "P")
-      cat("The ", object$det.part, " model with a Poisson stochastic part was used !\n\n")
-    cat("Priors on parameters (quantiles):\n\n")
-    print(ans1)
-    cat("\nPosteriors of the parameters (quantiles):\n\n")
-    print(ans2)
-    cat("\nPosteriors of the ECx (quantiles):\n\n")
-    print(ans3)
-  }
-
-  invisible(list(Qpriors = ans1,
-                 Qposteriors = ans2,
-                 QECx = ans3))
-}
-
+#' Summary of \code{reproFitTT} object
+#'
+#' This is the generic \code{summary} S3 method for the \code{reproFitTT} class.
+#' It shows the quantiles of priors and posteriors on parameters
+#' and the quantiles of the posterior on the ECx estimates.
+#'
+#' @param object an object of class \code{reproFitTT}
+#' @param quiet when \code{TRUE}, does not print
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @return The function returns a list with the following information:
+#' \item{Qpriors}{quantiles of the model priors}
+#' \item{Qposteriors}{quantiles of the model posteriors}
+#' \item{QECx}{quantiles of ECx estimates}
+#'
+#' @keywords summary
+#' 
+#' @importFrom stats qnorm qunif
+#' 
+#' @export
+summary.reproFitTT <- function(object, quiet = FALSE, ...) {
+
+  # quantiles of priors parameters
+  n.iter <- object$n.iter$end - object$n.iter$start
+
+  # b
+  log10b <- qunif(p = c(0.5, 0.025, 0.975),
+                  min = object$jags.data$log10bmin,
+                  max = object$jags.data$log10bmax)
+
+  b <- 10^log10b
+
+  # d
+  d <- qnorm(p = c(0.5, 0.025, 0.975),
+             mean = object$jags.data$meand,
+             sd = 1 / sqrt(object$jags.data$taud))
+
+  # e
+  log10e <- qnorm(p = c(0.5, 0.025, 0.975),
+                  mean = object$jags.data$meanlog10e,
+                  sd = 1 / sqrt(object$jags.data$taulog10e))
+
+  e <- 10^log10e
+
+  if (object$model.label == "P") {
+    res <- rbind(b, d, e)
+  }
+  if (object$model.label == "GP") {
+    # omega
+    log10omega <- qunif(p = c(0.5, 0.025, 0.975),
+                        min = object$jags.data$log10omegamin,
+                        max = object$jags.data$log10omegamax)
+
+    omega <- 10^log10omega
+
+    res <- rbind(b, d, e, omega)
+  }
+
+  ans1 <-  format(data.frame(res), scientific = TRUE, digits = 4)
+  colnames(ans1) <- c("50%", "2.5%", "97.5%")
+
+  # quantiles of estimated model parameters
+  ans2 <- format(object$estim.par, scientific = TRUE, digits = 4)
+  colnames(ans2) <- c("50%", "2.5%", "97.5%")
+
+  # estimated ECx and their CIs 95%
+  ans3 <- format(object$estim.ECx, scientific = TRUE, digits = 4)
+  colnames(ans3) <- c("50%", "2.5%", "97.5%")
+
+  if (! quiet) {
+    cat("Summary: \n\n")
+    if (object$model.label == "GP")
+      cat("The ", object$det.part, " model with a Gamma Poisson stochastic part was used !\n\n")
+    if(object$model.label == "P")
+      cat("The ", object$det.part, " model with a Poisson stochastic part was used !\n\n")
+    cat("Priors on parameters (quantiles):\n\n")
+    print(ans1)
+    cat("\nPosteriors of the parameters (quantiles):\n\n")
+    print(ans2)
+    cat("\nPosteriors of the ECx (quantiles):\n\n")
+    print(ans3)
+  }
+
+  invisible(list(Qpriors = ans1,
+                 Qposteriors = ans2,
+                 QECx = ans3))
+}
+
diff --git a/R/summary.survDataCstExp.R b/R/summary.survDataCstExp.R
index 19c6354834c023fbf7e57fb1af1c161fb8302381..25ad4826de0baa1635e7275dcedc4e06c5ef500f 100644
--- a/R/summary.survDataCstExp.R
+++ b/R/summary.survDataCstExp.R
@@ -11,16 +11,6 @@
 #' \item{NbrepTimeConc}{nb of replicates for all concentrations and time points}
 #' \item{NbsurvTimeConc}{nb of survivors. for all concentrations and time points}
 #' 
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#' 
-#' # (2) Create a survDataCstExp object
-#' dat <- survData(cadmium1)
-#' 
-#' # (3) Summarize the data set
-#' summary(dat)
-#' 
 #' @keywords summary
 #' 
 #' @export
diff --git a/R/summary.survDataVarExp.R b/R/summary.survDataVarExp.R
index 997b3a4cce5e44fb088e30449577755170a521d1..90409bedd0e310d044d99d6801115c8ca3f78551 100644
--- a/R/summary.survDataVarExp.R
+++ b/R/summary.survDataVarExp.R
@@ -12,15 +12,6 @@
 #' \item{NbsurvTimeRep}{nb of survivors. for all replicates and time points}
 #' \item{ConcTimeRep}{Concentration for all replicates and time points}
 #' 
-#' @examples
-#' # (1) Load the data
-#' data(propiconazole_pulse_exposure)
-#' 
-#' # (2) Create a survDataVarExp object
-#' out <- survData(propiconazole_pulse_exposure)
-#' 
-#' # (3) Summarize the data set
-#' summary(out)
 #' 
 #' @keywords summary
 #' 
diff --git a/R/summary.survFit.R b/R/summary.survFit.R
index 51e8bd8967a2e4c0c9e2fa5a5b5f0d93443c509a..0fdc45725c3da079a6eeafa6ffd75f7f5ec1bdca 100644
--- a/R/summary.survFit.R
+++ b/R/summary.survFit.R
@@ -1,168 +1,152 @@
-#' Summary of \code{survFit} object
-#'
-#' This is the generic \code{summary} S3 method for the \code{survFit} class.
-#' It shows the quantiles of priors and posteriors on parameters.
-#'
-#' @param object An object of class \code{survFit}.
-#' @param quiet When \code{TRUE}, does not print.
-#' @param EFSA_name If \code{TRUE}, the current terminology by
-#'  the one used in the recent EFSA PPR Scientific Opinion (2018).
-#' @param \dots Further arguments to be passed to generic methods.
-#'
-#' @return The function returns a list with the following information:
-#' \item{Qpriors}{quantiles of the model priors}
-#' \item{Qposteriors}{quantiles of the model posteriors}
-#' 
-#' @references 
-#' EFSA PPR Scientific Opinion (2018)
-#' \emph{Scientific Opinion on the state of the art of Toxicokinetic/Toxicodynamic (TKTD) effect models for regulatory risk assessment of pesticides for aquatic organisms}
-#' \url{https://www.efsa.europa.eu/en/efsajournal/pub/5377}.
-#'
-#' @examples
-#' # (1) Load the data
-#' data(propiconazole)
-#'
-#' # (2) Create a survData object
-#' dat <- survData(propiconazole)
-#'
-#' \donttest{
-#' # (3) Run the survFit function
-#' out <- survFit(dat, model_type = "SD")
-#'
-#' # (4) summarize the survFit object
-#' summary(out)
-#' }
-#'
-#' @keywords summary
-#'
-#' @importFrom stats qnorm qunif
-#' 
-#' @export
-#' 
-
-summary.survFit <- function(object,
-                            quiet = FALSE,
-                            EFSA_name = FALSE,
-                            ...) {
-  
-  estim_parameters <- object$estim.par
-  
-  if(EFSA_name == TRUE){
-    parameters_SD_HBon = c("kD", "hb", "zw", "bw")
-    parameters_SD_HBoff = c("kD", "zw", "bw")
-    parameters_IT_HBon = c("kD", "hb", "mw", "beta")
-    parameters_IT_HBoff = c("kD", "mw", "beta")
-    
-    estim_parameters$parameters <- gsub("kd","kD", estim_parameters$parameters)
-    estim_parameters$parameters <- gsub("kk","bw", estim_parameters$parameters)
-    estim_parameters$parameters <- gsub("z","zw", estim_parameters$parameters)
-    estim_parameters$parameters <- gsub("alpha","mw", estim_parameters$parameters)
-    
-  } else{
-    parameters_SD_HBon = c("kd", "hb", "z", "kk")
-    parameters_SD_HBoff = c("kd", "z", "kk")
-    parameters_IT_HBon = c("kd", "hb", "alpha", "beta")
-    parameters_IT_HBoff = c("kd", "alpha", "beta")
-  }
-  
-  param <- object$jags.data
-  if("hb" %in% estim_parameters[, "parameters"]){
-    hb_value = TRUE
-  } else{
-    hb_value = FALSE
-  }
-  
-  # kd
-  kd_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
-                   mean = param$kd_meanlog10,
-                   sd = param$kd_sdlog10)
-  
-  kd <- 10^kd_log10
-  
-  
-  # hb
-  if(hb_value == TRUE){
-    hb_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
-                      mean = param$hb_meanlog10,
-                      sd = param$hb_sdlog10)
-    
-    hb <- 10^hb_log10
-  } 
-  
-  if(object$model_type == "SD"){
-    
-    # kk
-    kk_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
-                      mean = param$kk_meanlog10,
-                      sd = param$kk_sdlog10)
-    
-    kk <- 10^kk_log10
-    
-    ## z
-    z_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
-                     mean = param$z_meanlog10,
-                     sd = param$z_sdlog10)
-    
-    z <- 10^z_log10
-    
-    if(hb_value == TRUE){
-      res <- data.frame(parameters = parameters_SD_HBon,
-                        median = c(kd[1], hb[1], z[1], kk[1]),
-                        Q2.5 = c(kd[2], hb[2], z[2], kk[2]),
-                        Q97.5 = c(kd[3], hb[3], z[3], kk[3]))
-    } else{
-      res <- data.frame(parameters = parameters_SD_HBoff,
-                        median = c(kd[1], z[1], kk[1]),
-                        Q2.5 = c(kd[2], z[2], kk[2]),
-                        Q97.5 = c(kd[3], z[3], kk[3]))
-    }
-    
-  }
-  if(object$model_type == "IT"){
-    
-    # alpha
-    alpha_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
-                     mean = param$alpha_meanlog10,
-                     sd = param$alpha_sdlog10)
-    
-    alpha <- 10^alpha_log10
-    
-    # beta
-    beta_log10 <- qunif(p = c(0.5, 0.025, 0.975),
-                      min = param$beta_minlog10,
-                      max = param$beta_maxlog10)
-    
-    beta <- 10^beta_log10
-    
-    if(hb_value == TRUE){
-      res <- data.frame(parameters = parameters_IT_HBon,
-                        median = c(kd[1], hb[1], alpha[1], beta[1]),
-                        Q2.5 = c(kd[2], hb[2], alpha[2], beta[2]),
-                        Q97.5 = c(kd[3], hb[3], alpha[3], beta[3]))
-    } else{
-      res <- data.frame(parameters = parameters_IT_HBoff,
-                        median = c(kd[1], alpha[1], beta[1]),
-                        Q2.5 = c(kd[2], alpha[2], beta[2]),
-                        Q97.5 = c(kd[3], alpha[3], beta[3]))
-    }
-    
-  }
-  
-  
-  ans1 <- format(data.frame(res), scientific = TRUE, digits = 4)
-  
-  # quantiles of estimated model parameters
-  ans2 <- format(estim_parameters, scientific = TRUE, digits = 4)
-  
-  # print
-  if (! quiet) {
-    cat("Summary: \n\n")
-    cat("Priors of the parameters (quantiles) (select with '$Qpriors'):\n\n")
-    print(ans1, row.names = FALSE)
-    cat("\nPosteriors of the parameters (quantiles) (select with '$Qposteriors'):\n\n")
-    print(ans2, row.names = FALSE)
-  }
-  
-  invisible(list(Qpriors = ans1,
-                 Qposteriors = ans2))
-}
+#' Summary of \code{survFit} object
+#'
+#' This is the generic \code{summary} S3 method for the \code{survFit} class.
+#' It shows the quantiles of priors and posteriors on parameters.
+#'
+#' @param object An object of class \code{survFit}.
+#' @param quiet When \code{TRUE}, does not print.
+#' @param EFSA_name If \code{TRUE}, the current terminology by
+#'  the one used in the recent EFSA PPR Scientific Opinion (2018).
+#' @param \dots Further arguments to be passed to generic methods.
+#'
+#' @return The function returns a list with the following information:
+#' \item{Qpriors}{quantiles of the model priors}
+#' \item{Qposteriors}{quantiles of the model posteriors}
+#' 
+#' @references 
+#' EFSA PPR Scientific Opinion (2018)
+#' \emph{Scientific Opinion on the state of the art of Toxicokinetic/Toxicodynamic (TKTD) effect models for regulatory risk assessment of pesticides for aquatic organisms}
+#' \url{https://www.efsa.europa.eu/en/efsajournal/pub/5377}.
+#'
+#' @keywords summary
+#'
+#' @importFrom stats qnorm qunif
+#' 
+#' @export
+#' 
+summary.survFit <- function(object,
+                            quiet = FALSE,
+                            EFSA_name = FALSE,
+                            ...) {
+  
+  estim_parameters <- object$estim.par
+  
+  if (EFSA_name == TRUE){
+    parameters_SD_HBon = c("kD", "hb", "zw", "bw")
+    parameters_SD_HBoff = c("kD", "zw", "bw")
+    parameters_IT_HBon = c("kD", "hb", "mw", "beta")
+    parameters_IT_HBoff = c("kD", "mw", "beta")
+    
+    estim_parameters$parameters <- gsub("kd","kD", estim_parameters$parameters)
+    estim_parameters$parameters <- gsub("kk","bw", estim_parameters$parameters)
+    estim_parameters$parameters <- gsub("z","zw", estim_parameters$parameters)
+    estim_parameters$parameters <- gsub("alpha","mw", estim_parameters$parameters)
+    
+  } else{
+    parameters_SD_HBon = c("kd", "hb", "z", "kk")
+    parameters_SD_HBoff = c("kd", "z", "kk")
+    parameters_IT_HBon = c("kd", "hb", "alpha", "beta")
+    parameters_IT_HBoff = c("kd", "alpha", "beta")
+  }
+  
+  param <- object$jags.data
+  if("hb" %in% estim_parameters[, "parameters"]){
+    hb_value = TRUE
+  } else{
+    hb_value = FALSE
+  }
+  
+  # kd
+  kd_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
+                   mean = param$kd_meanlog10,
+                   sd = param$kd_sdlog10)
+  
+  kd <- 10^kd_log10
+  
+  
+  # hb
+  if(hb_value == TRUE){
+    hb_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
+                      mean = param$hb_meanlog10,
+                      sd = param$hb_sdlog10)
+    
+    hb <- 10^hb_log10
+  } 
+  
+  if(object$model_type == "SD"){
+    
+    # kk
+    kk_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
+                      mean = param$kk_meanlog10,
+                      sd = param$kk_sdlog10)
+    
+    kk <- 10^kk_log10
+    
+    ## z
+    z_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
+                     mean = param$z_meanlog10,
+                     sd = param$z_sdlog10)
+    
+    z <- 10^z_log10
+    
+    if(hb_value == TRUE){
+      res <- data.frame(parameters = parameters_SD_HBon,
+                        median = c(kd[1], hb[1], z[1], kk[1]),
+                        Q2.5 = c(kd[2], hb[2], z[2], kk[2]),
+                        Q97.5 = c(kd[3], hb[3], z[3], kk[3]))
+    } else{
+      res <- data.frame(parameters = parameters_SD_HBoff,
+                        median = c(kd[1], z[1], kk[1]),
+                        Q2.5 = c(kd[2], z[2], kk[2]),
+                        Q97.5 = c(kd[3], z[3], kk[3]))
+    }
+    
+  }
+  if(object$model_type == "IT"){
+    
+    # alpha
+    alpha_log10 <- qnorm(p = c(0.5, 0.025, 0.975),
+                     mean = param$alpha_meanlog10,
+                     sd = param$alpha_sdlog10)
+    
+    alpha <- 10^alpha_log10
+    
+    # beta
+    beta_log10 <- qunif(p = c(0.5, 0.025, 0.975),
+                      min = param$beta_minlog10,
+                      max = param$beta_maxlog10)
+    
+    beta <- 10^beta_log10
+    
+    if(hb_value == TRUE){
+      res <- data.frame(parameters = parameters_IT_HBon,
+                        median = c(kd[1], hb[1], alpha[1], beta[1]),
+                        Q2.5 = c(kd[2], hb[2], alpha[2], beta[2]),
+                        Q97.5 = c(kd[3], hb[3], alpha[3], beta[3]))
+    } else{
+      res <- data.frame(parameters = parameters_IT_HBoff,
+                        median = c(kd[1], alpha[1], beta[1]),
+                        Q2.5 = c(kd[2], alpha[2], beta[2]),
+                        Q97.5 = c(kd[3], alpha[3], beta[3]))
+    }
+    
+  }
+  
+  
+  ans1 <- format(data.frame(res), scientific = TRUE, digits = 4)
+  
+  # quantiles of estimated model parameters
+  ans2 <- format(estim_parameters, scientific = TRUE, digits = 4)
+  
+  # print
+  if (! quiet) {
+    cat("Summary: \n\n")
+    cat("Priors of the parameters (quantiles) (select with '$Qpriors'):\n\n")
+    print(ans1, row.names = FALSE)
+    cat("\nPosteriors of the parameters (quantiles) (select with '$Qposteriors'):\n\n")
+    print(ans2, row.names = FALSE)
+  }
+  
+  invisible(list(Qpriors = ans1,
+                 Qposteriors = ans2))
+}
diff --git a/R/summary.survFitTKTD.R b/R/summary.survFitTKTD.R
index c68ac7979373b7c9e4280ddfde38b8dfe0a368f1..dd004fcf3cb49a2660cf6d6bac0c9c41e72808a5 100644
--- a/R/summary.survFitTKTD.R
+++ b/R/summary.survFitTKTD.R
@@ -1,87 +1,72 @@
-#' Summary of \code{survFitTKTD} object
-#'
-#' This is the generic \code{summary} S3 methode for the \code{survFitTKTD} class.
-#' It shows the quantiles of priors and posteriors on parameters.
-#'
-#' @param object an object of class \code{survFitTKTD}
-#' @param quiet when \code{TRUE}, does not print
-#' @param \dots Further arguments to be passed to generic methods.
-#'
-#' @return The function returns a list with the following information:
-#' \item{Qpriors}{quantiles of the model priors}
-#' \item{Qposteriors}{quantiles of the model posteriors}
-#'
-#' @examples
-#' # (1) Load the data
-#' data(propiconazole)
-#'
-#' # (2) Create a survData object
-#' dat <- survData(propiconazole)
-#'
-#' \donttest{
-#' # (3) Run the survFitTKTD function
-#' out <- survFitTKTD(dat)
-#'
-#' # (4) summarize the survFitTKTD object
-#' summary(out)
-#' }
-#'
-#' @keywords summary
-#'
-#' @importFrom stats qnorm qunif
-#' 
-#' @export
-summary.survFitTKTD <- function(object, quiet = FALSE, ...) {
-  
-  # quantiles of priors parameters
-  n.iter <- object$n.iter$end - object$n.iter$start
-  
-  # kd
-  log10kd <- qnorm(p = c(0.5, 0.025, 0.975),
-                   mean = object$jags.data$meanlog10kd,
-                   sd = 1 / sqrt(object$jags.data$taulog10kd))
-  
-  kd <- 10^log10kd
-  
-  # ks
-  log10ks <- qnorm(p = c(0.5, 0.025, 0.975),
-                   mean = object$jags.data$meanlog10ks,
-                   sd = 1 / sqrt(object$jags.data$taulog10ks))
-  
-  ks <- 10^log10ks
-  
-  # nec
-  log10nec <- qnorm(p = c(0.5, 0.025, 0.975),
-                    mean = object$jags.data$meanlog10nec,
-                    sd = 1 / sqrt(object$jags.data$taulog10nec))
-  
-  nec <- 10^log10nec
-  
-  # m0
-  log10m0 <- qnorm(p = c(0.5, 0.025, 0.975),
-                   mean = object$jags.data$meanlog10m0,
-                   sd = 1 / sqrt(object$jags.data$taulog10m0))
-  
-  m0 <- 10^log10m0
-  
-  res <- rbind(kd, ks, nec, m0)
-  
-  ans1 <- format(data.frame(res), scientific = TRUE, digits = 4)
-  colnames(ans1) <- c("50%", "2.5%", "97.5%")
-  
-  # quantiles of estimated model parameters
-  ans2 <- format(object$estim.par, scientific = TRUE, digits = 4)
-  colnames(ans2) <- c("50%", "2.5%", "97.5%")
-  
-  # print
-  if (! quiet) {
-    cat("Summary: \n\n")
-    cat("Priors on parameters (quantiles):\n\n")
-    print(ans1, row.names = FALSE)
-    cat("\nPosteriors of the parameters (quantiles):\n\n")
-    print(ans2, row.names = FALSE)
-  }
-  
-  invisible(list(Qpriors = ans1,
-                 Qpost = ans2))
-}
+#' Summary of \code{survFitTKTD} object
+#'
+#' This is the generic \code{summary} S3 methode for the \code{survFitTKTD} class.
+#' It shows the quantiles of priors and posteriors on parameters.
+#'
+#' @param object an object of class \code{survFitTKTD}
+#' @param quiet when \code{TRUE}, does not print
+#' @param \dots Further arguments to be passed to generic methods.
+#'
+#' @return The function returns a list with the following information:
+#' \item{Qpriors}{quantiles of the model priors}
+#' \item{Qposteriors}{quantiles of the model posteriors}
+#'
+#' @keywords summary
+#'
+#' @importFrom stats qnorm qunif
+#' 
+#' @export
+summary.survFitTKTD <- function(object, quiet = FALSE, ...) {
+  
+  # quantiles of priors parameters
+  n.iter <- object$n.iter$end - object$n.iter$start
+  
+  # kd
+  log10kd <- qnorm(p = c(0.5, 0.025, 0.975),
+                   mean = object$jags.data$meanlog10kd,
+                   sd = 1 / sqrt(object$jags.data$taulog10kd))
+  
+  kd <- 10^log10kd
+  
+  # ks
+  log10ks <- qnorm(p = c(0.5, 0.025, 0.975),
+                   mean = object$jags.data$meanlog10ks,
+                   sd = 1 / sqrt(object$jags.data$taulog10ks))
+  
+  ks <- 10^log10ks
+  
+  # nec
+  log10nec <- qnorm(p = c(0.5, 0.025, 0.975),
+                    mean = object$jags.data$meanlog10nec,
+                    sd = 1 / sqrt(object$jags.data$taulog10nec))
+  
+  nec <- 10^log10nec
+  
+  # m0
+  log10m0 <- qnorm(p = c(0.5, 0.025, 0.975),
+                   mean = object$jags.data$meanlog10m0,
+                   sd = 1 / sqrt(object$jags.data$taulog10m0))
+  
+  m0 <- 10^log10m0
+  
+  res <- rbind(kd, ks, nec, m0)
+  
+  ans1 <- format(data.frame(res), scientific = TRUE, digits = 4)
+  colnames(ans1) <- c("50%", "2.5%", "97.5%")
+  
+  # quantiles of estimated model parameters
+  ans2 <- format(object$estim.par, scientific = TRUE, digits = 4)
+  colnames(ans2) <- c("50%", "2.5%", "97.5%")
+  
+  # print
+  if (! quiet) {
+    cat("Summary: \n\n")
+    cat("Priors on parameters (quantiles):\n\n")
+    print(ans1, row.names = FALSE)
+    cat("\nPosteriors of the parameters (quantiles):\n\n")
+    print(ans2, row.names = FALSE)
+  }
+  
+  invisible(list(Qpriors = ans1,
+                 Qpost = ans2))
+}
diff --git a/R/summary.survFitTT.R b/R/summary.survFitTT.R
index 538d9c68fde4c3385e63eefef43090f7459e0953..e94e2b2d7ad75cd49da3017de8790bf47d03bd75 100644
--- a/R/summary.survFitTT.R
+++ b/R/summary.survFitTT.R
@@ -1,96 +1,80 @@
-#' Summary of \code{survFitTT} object
-#'
-#' This is the generic \code{summary} S3 method for the \code{survFitTT} class.
-#' It shows the quantiles of priors and posteriors on parameters and the quantiles
-#' of the posteriors on the LCx estimates.
-#'
-#' @param object an object of class \code{survFitTT}
-#' @param quiet when \code{TRUE}, does not print
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @return The function returns a list with the following information:
-#' \item{Qpriors}{quantiles of the model priors}
-#' \item{Qposteriors}{quantiles of the model posteriors}
-#' \item{QLCx}{quantiles of LCx estimates}
-#'
-#' @examples
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create a survData object
-#' cadmium1 <- survData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the survFitTT function with the log-logistic
-#' # binomial model
-#' out <- survFitTT(cadmium1, lcx = c(5, 10, 15, 20, 30, 50, 80),
-#'                  quiet = TRUE)
-#'
-#' # (4) summarize the survFitTT object
-#' summary(out)
-#' }
-#'
-#' @keywords summary
-#'
-#' @importFrom stats qnorm qunif
-#' 
-#' @export
-summary.survFitTT <- function(object, quiet = FALSE, ...) {
-
-  # quantiles of priors parameters
-  n.iter <- object$n.iter$end - object$n.iter$start
-
-  # b
-  log10b <- qunif(p = c(0.5, 0.025, 0.975),
-                  min = object$jags.data$log10bmin,
-                  max = object$jags.data$log10bmax)
-
-  b <- 10^log10b
-
-  # e
-  log10e <- qnorm(p = c(0.5, 0.025, 0.975),
-                  mean = object$jags.data$meanlog10e,
-                  sd = 1 / sqrt(object$jags.data$taulog10e))
-
-  e <- 10^log10e
-
-  # d
-  if (object$det.part == "loglogisticbinom_3") {
-
-    d <- qunif(p = c(0.5, 0.025, 0.975),
-               min = object$jags.data$dmin,
-               max = object$jags.data$dmax)
-
-    res <- rbind(b, d, e)
-  } else {
-    res <- rbind(b, e)
-  }
-
-  ans1 <- format(data.frame(res), scientific = TRUE, digits = 4)
-  colnames(ans1) <- c("50%", "2.5%", "97.5%")
-
-  # quantiles of estimated model parameters
-  ans2 <- format(object$estim.par, scientific = TRUE, digits = 4)
-  colnames(ans2) <- c("50%", "2.5%", "97.5%")
-
-  # estimated ECx and their CIs 95%
-  ans3 <- format(object$estim.LCx, scientific = TRUE, digits = 4)
-  colnames(ans3) <- c("50%", "2.5%", "97.5%")
-
-  # print
-  if (! quiet) {
-    cat("Summary: \n\n")
-    cat("The ", object$det.part, " model with a binomial stochastic part was used !\n\n")
-    cat("Priors on parameters (quantiles):\n\n")
-    print(ans1)
-    cat("\nPosteriors of the parameters (quantiles):\n\n")
-    print(ans2)
-    cat("\nPosteriors of the LCx (quantiles):\n\n")
-    print(ans3)
-  }
-
-  invisible(list(Qpriors = ans1,
-                 Qpost = ans2,
-                 QLCx = ans3))
-}
-
+#' Summary of \code{survFitTT} object
+#'
+#' This is the generic \code{summary} S3 method for the \code{survFitTT} class.
+#' It shows the quantiles of priors and posteriors on parameters and the quantiles
+#' of the posteriors on the LCx estimates.
+#'
+#' @param object an object of class \code{survFitTT}
+#' @param quiet when \code{TRUE}, does not print
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @return The function returns a list with the following information:
+#' \item{Qpriors}{quantiles of the model priors}
+#' \item{Qposteriors}{quantiles of the model posteriors}
+#' \item{QLCx}{quantiles of LCx estimates}
+#'
+#'
+#' @keywords summary
+#'
+#' @importFrom stats qnorm qunif
+#' 
+#' @export
+summary.survFitTT <- function(object, quiet = FALSE, ...) {
+
+  # quantiles of priors parameters
+  n.iter <- object$n.iter$end - object$n.iter$start
+
+  # b
+  log10b <- qunif(p = c(0.5, 0.025, 0.975),
+                  min = object$jags.data$log10bmin,
+                  max = object$jags.data$log10bmax)
+
+  b <- 10^log10b
+
+  # e
+  log10e <- qnorm(p = c(0.5, 0.025, 0.975),
+                  mean = object$jags.data$meanlog10e,
+                  sd = 1 / sqrt(object$jags.data$taulog10e))
+
+  e <- 10^log10e
+
+  # d
+  if (object$det.part == "loglogisticbinom_3") {
+
+    d <- qunif(p = c(0.5, 0.025, 0.975),
+               min = object$jags.data$dmin,
+               max = object$jags.data$dmax)
+
+    res <- rbind(b, d, e)
+  } else {
+    res <- rbind(b, e)
+  }
+
+  ans1 <- format(data.frame(res), scientific = TRUE, digits = 4)
+  colnames(ans1) <- c("50%", "2.5%", "97.5%")
+
+  # quantiles of estimated model parameters
+  ans2 <- format(object$estim.par, scientific = TRUE, digits = 4)
+  colnames(ans2) <- c("50%", "2.5%", "97.5%")
+
+  # estimated ECx and their CIs 95%
+  ans3 <- format(object$estim.LCx, scientific = TRUE, digits = 4)
+  colnames(ans3) <- c("50%", "2.5%", "97.5%")
+
+  # print
+  if (! quiet) {
+    cat("Summary: \n\n")
+    cat("The ", object$det.part, " model with a binomial stochastic part was used !\n\n")
+    cat("Priors on parameters (quantiles):\n\n")
+    print(ans1)
+    cat("\nPosteriors of the parameters (quantiles):\n\n")
+    print(ans2)
+    cat("\nPosteriors of the LCx (quantiles):\n\n")
+    print(ans3)
+  }
+
+  invisible(list(Qpriors = ans1,
+                 Qpost = ans2,
+                 QLCx = ans3))
+}
+
diff --git a/R/survData.R b/R/survData.R
index 6e4ee3eca8fedc68e2d9f5f390175bd5a7618f84..2e63cb848a7ca48a87ef9ffdd139f749262a6e63 100644
--- a/R/survData.R
+++ b/R/survData.R
@@ -43,15 +43,6 @@
 #' @keywords transformation
 #'
 #' @importFrom tibble as_tibble
-#' 
-#' @examples
-#'
-#' # (1) Load the survival data set
-#' data(zinc)
-#'
-#' # (2) Create an objet of class 'survData'
-#' dat <- survData(zinc)
-#' class(dat)
 #'
 #' @export
 #' 
@@ -84,17 +75,6 @@ survData <- function(x) {
 #' @return a boolean \code{TRUE} if concentration in \code{replicate} is constant,
 #'  or \code{FALSE} if the concentration in at least one of the replicates is time-variable,
 #'  and/or if \code{NA} occures. 
-#'
-#' @examples
-#'
-#' # (1) Load the survival data set and test if concentration in replicates is constant
-#' data("propiconazole")
-#' is_exposure_constant(propiconazole)
-#' is_exposure_constant(survData(propiconazole))
-#'
-#'  # (1) Load the survival data set and test if concentration in replicates is constant
-#' data("propiconazole_pulse_exposure") 
-#' is_exposure_constant(propiconazole_pulse_exposure)
 #' 
 #' @export
 #' 
@@ -174,17 +154,6 @@ Ninit <- function(x) {
 #' 
 #' @return a dataframe suitable for `survData`
 #'
-#' @examples
-#'
-#' # (1) Load the two survival data sets
-#' data(propiconazole_pulse_exposure)
-#' exposure <- propiconazole_pulse_exposure[,c("replicate", "time", "conc")]
-#' survival <- propiconazole_pulse_exposure[,c("replicate", "time", "Nsurv")]
-#'
-#' # (2) Create an objet of class 'survData'
-#' dat_join <- survData(survData_join(exposure, survival))
-#' class(dat_join)
-#'
 #' @export
 #'
 survData_join <- function(x, y) {
diff --git a/R/survDataCheck.R b/R/survDataCheck.R
index 39011f280f6d446670e6a1b854c768c770003418..abc93758648d268b2c511032b15b612307727879 100644
--- a/R/survDataCheck.R
+++ b/R/survDataCheck.R
@@ -34,18 +34,7 @@
 #' other errors.
 #'
 #' @seealso \code{\link{survData}}
-#'
-#' @examples
-#' # Run the check data function
-#' data(zinc)
-#' survDataCheck(zinc)
-#'
-#' # Now we insert an error in the dataset, by artificially increasing the
-#' # number of survivors at a given time point, in such a way that the number
-#' # of indivuals increases in the corresponding replicate
-#' zinc[25, "Nsurv"] <- as.integer(20)
-#' survDataCheck(zinc, diagnosis.plot = TRUE)
-#'
+#' 
 #' @importFrom magrittr '%>%'
 #' @importFrom dplyr arrange
 #' @importFrom dplyr mutate
diff --git a/R/survFit.R b/R/survFit.R
index 0d9add6f06dc7677338b6fea41c1b3f6b5487709..a6dd5cc15f9e49c56110dea6ef5ebd63c799af60 100644
--- a/R/survFit.R
+++ b/R/survFit.R
@@ -1,299 +1,279 @@
-#' Fits a TKTD model for survival analysis using Bayesian inference
-#'
-#' This function estimates the parameters of a TKTD model ('SD' or 'IT')
-#' for survival analysis using Bayesian inference. In this model,
-#' the survival rate of individuals is modeled as a function of the chemical compound
-#' concentration with a mechanistic description of the effects on survival over
-#' time.
-#' 
-#' The function \code{survFit} returns the parameter estimates of Toxicokinetic-toxicodynamic (TKTD) models
-#' \code{SD} for 'Stochastic Death' or \code{IT} fo 'Individual Tolerance'.
-#' TKTD models, and particularly the General Unified Threshold model of
-#' Survival (GUTS), provide a consistent process-based
-#' framework to analyse both time and concentration dependent datasets.
-#' In GUTS-SD, all organisms are assumed to have the same internal concentration 
-#' threshold (denoted \eqn{z}), and, once exceeded, the instantaneous probability
-#' to die increases linearly with the internal concentration.
-#' In GUTS-IT, the threshold concentration is distributed among all the organisms, and once 
-#' exceeded in one individual, this individual dies immediately.
-#' 
-#' 
-#' When class of \code{object} is \code{survDataCstExp}, see \link[=survFit.survDataCstExp]{survFit.survDataCstExp} ;
-#' and for a \code{survDataVarExp}, see \link[=survFit.survDataVarExp]{survFit.survDataVarExp}.
-#'
-#' @rdname survFit
-#'
-#' @param data An object of class \code{survDataCstExp} or \code{survDataVarExp}.
-#' @param model_type Can be \code{"SD"} or \code{"IT"} to choose
-#'   between "Stochastic Death" or "Individual Tolerance" models
-#'   (resp.). See the modeling vignette for details.
-#' @param quiet If \code{FALSE}, prints logs and progress bar from
-#'   JAGS.
-#' @param n.chains A positive integer specifying the number of MCMC chains. The minimum required number 
-#' of chains is 2.
-#' @param n.adapt A positive integer specifying the number of iterations for adaptation. If \code{n.adapt} = 0
-#'  then no adaptation takes place.
-#' @param n.iter A positive integer specifying the number of iterations to monitor for each chain.
-#' @param n.warmup A positive integer specifying the number of warmup (aka burnin) iterations per chain. 
-#' @param thin.interval A positive integer specifying the period to monitor.
-#' @param limit.sampling if \code{FALSE} (default is \code{TRUE}), there is no limit to the number of iterations
-#' in MCMC imposed by the \code{raftery.diag} test.
-#' @param dic.compute if \code{TRUE} (default is \code{FALSE}), it generates penalized deviance samples to compute
-#' the Deviance Information Criterion (DIC) with the \code{rjags} package
-#' @param dic.type type of penalty to use. A string identifying the type of penalty: \code{pD} or \code{popt}
-#'  (see function \code{\link[rjags]{dic.samples}})
-#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into account.
-#' If \code{FALSE}, parameter \code{hb} is set to 0. The default is \code{TRUE}.
-#' @param  hb_valueFIXED If \code{hb_value} is \code{FALSE}, then \code{hb_valueFiXED} is the value to fix \code{hb}.
-#'   If \code{hb_value} is \code{FALSE} and  \code{hb_valueFiXED} is \code{NA}, then \code{hb} is fixed to \code{0}.
-#' @param extend_time Number of for each replicate used for linear 
-#' interpolation (comprise between time to compute and fitting accuracy)
-#' @param \dots Further arguments to be passed to generic methods
-#' 
-#' @references Jager, T., Albert, C., Preuss, T. G. and Ashauer, R. (2011) 
-#' General unified threshold model of survival-a toxicokinetic-toxicodynamic
-#'  framework for ecotoxicology, \emph{Environmental Science and Technology}, 45, 2529-2540.
-#' 303-314.
-#' 
-#' @keywords estimation 
-#' 
-#' @return an object of class \code{survFit}
-#' 
-#' @export
-#' 
-#' 
-survFit <- function(data,
-                    model_type,
-                    quiet,
-                    n.chains,
-                    n.adapt,
-                    n.iter,
-                    n.warmup,
-                    thin.interval,
-                    limit.sampling,
-                    dic.compute,
-                    dic.type,
-                    hb_value,
-                    hb_valueFIXED,
-                    ...){
-  UseMethod("survFit")
-}
-
-
-################################################################################
-#
-#  PRIORS
-#
-################################################################################
-
-#' Create a list of scalars giving priors to use in Bayesian inference.
-#'
-#' @param x An object of class \code{survData}
-#' @param model_type TKTD model type ('SD' or 'IT')
-#' 
-#' @return A list for parameterization of priors for Bayesian inference with JAGS.
-#'
-#' @examples 
-#' 
-#' # (1) Load the data
-#' data(cadmium1)
-#' 
-#' # (2) Create a survData object
-#' dat <- survData(cadmium1)
-#' 
-#' # (3) Create priors for SD model_type
-#' priors_survData(dat, model_type = "SD")
-#' 
-#' # (4) Create priors for IT model_type
-#' priors_survData(dat, model_type = "IT")
-#' 
-#' @export
-
-
-# priors <- function(x, ...){
-#   UseMethod("priors")
-# }
-
-
-priors_survData <- function(x, model_type = NULL){
-  
-  data <- filter(x, time != 0)
-  
-  # Parameter calculation of concentration min and max
-  conc_min <- min(data$conc[data$conc != 0], na.rm = TRUE) # to remove 0 and NA
-  conc_max <- max(data$conc, na.rm = TRUE)
-  
-  time_min <- min(data$time)
-  time_max <- max(data$time)
-  
-  conc_unic <- sort(unique(data$conc))
-  conc_unicPrec <- dplyr::lag(conc_unic)
-  conc_minDelta <- min(conc_unic - conc_unicPrec, na.rm = TRUE)
-  
-  ##
-  ## dominant rate constant: kd
-  ##
-  
-  kd_max <- -log(0.001) / time_min
-  kd_min <- -log(0.999) / time_max
-  
-  ##
-  ## background hazard rate
-  ##
-  
-  hb_max <- -log(0.5) / time_min
-  hb_min <- -log(0.999) / time_max
-  
-  ##
-  ## killing rate parameter: kk
-  ##
-  
-  kk_max <- -log(0.001) / (time_min * conc_minDelta)
-  kk_min <- -log(0.999) / (time_max * (conc_max - conc_min))
-  
-  ##
-  ## beta
-  ##
-  
-  beta_minlog10 <- -2
-  beta_maxlog10 <- 2
-  
-  priorsMinMax <- list(
-    conc_min = conc_min,
-    conc_max = conc_max,
-    
-    kd_min = kd_min,
-    kd_max = kd_max,
-    
-    hb_min = hb_min,
-    hb_max = hb_max )
-  
-  ##
-  ## Construction of the list of priors
-  ##
-  
-  priorsList <-  list(
-    ##
-    ## dominant rate constant: kd
-    ##
-    kd_meanlog10 = (log10(kd_max) + log10(kd_min)) / 2 ,
-    kd_sdlog10 = (log10(kd_max) - log10(kd_min)) / 4 ,
-    ##
-    ## background hazard rate
-    ##
-    hb_meanlog10 = (log10(hb_max) + log10(hb_min)) / 2 ,
-    hb_sdlog10 = (log10(hb_max) - log10(hb_min)) / 4
-  )
-  
-  if(model_type == "IT"){
-    
-    ## priorsMinMax
-    priorsMinMax$beta_min <- beta_minlog10
-    priorsMinMax$beta_max <- beta_maxlog10
-    
-    ## priorsList
-    ### non effect threshold: scale parameter & median of a log-logistic distribution
-    priorsList$alpha_meanlog10 <- (log10(conc_max) + log10(conc_min)) / 2
-    priorsList$alpha_sdlog10 <- (log10(conc_max) - log10(conc_min)) / 4
-    
-    ### shape parameter of a log-logistic distribution
-    priorsList$beta_minlog10 <- beta_minlog10
-    priorsList$beta_maxlog10 <- beta_maxlog10
-    
-  } else if (model_type == "SD"){
-    
-    ## priorsMinMax
-    priorsMinMax$kk_min <- kk_min
-    priorsMinMax$kk_max <- kk_max
-    
-    ## priorsList
-    ### killing rate parameter: kk
-    priorsList$kk_meanlog10 <- (log10(kk_max) + log10(kk_min)) / 2
-    priorsList$kk_sdlog10 <- (log10(kk_max) - log10(kk_min)) / 4
-    ### non effect threshold: z
-    priorsList$z_meanlog10 <- (log10(conc_max) + log10(conc_min)) / 2
-    priorsList$z_sdlog10 <- (log10(conc_max) - log10(conc_min)) / 4
-  } else stop("please, provide the 'model_type': 'IT' or 'SD'")
-  
-  
-  return(list(priorsList = priorsList,
-              priorsMinMax = priorsMinMax))
-}
-
-
-#############################################################################
-#
-#    survFit_TKTD_params
-#
-#############################################################################
-  
-survFit_TKTD_params <- function(mcmc, model_type, hb_value = TRUE) {
-    # create the table of posterior estimated parameters
-    # for the survival analyses
-    # INPUT:
-    # - mcmc:  list of estimated parameters for the model with each item representing
-    # a chain
-    # OUTPUT:
-    # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
-    # parameters)
-    
-    # Retrieving parameters of the model
-    res.M <- summary(mcmc)
-    
-    kd <- 10^res.M$quantiles["kd_log10", "50%"]
-    kd_inf95 <- 10^res.M$quantiles["kd_log10", "2.5%"]
-    kd_sup95 <- 10^res.M$quantiles["kd_log10", "97.5%"]
-    
-    if(hb_value == TRUE){
-      hb <- 10^res.M$quantiles["hb_log10", "50%"]
-      hb_inf95 <- 10^res.M$quantiles["hb_log10", "2.5%"]
-      hb_sup95 <- 10^res.M$quantiles["hb_log10", "97.5%"]
-    }
-    
-    if(model_type == "SD"){
-      kk <- 10^res.M$quantiles["kk_log10", "50%"]
-      kk_inf95 <- 10^res.M$quantiles["kk_log10", "2.5%"]
-      kk_sup95 <- 10^res.M$quantiles["kk_log10", "97.5%"]
-      
-      z <- 10^res.M$quantiles["z_log10", "50%"]
-      z_inf95 <- 10^res.M$quantiles["z_log10", "2.5%"]
-      z_sup95 <- 10^res.M$quantiles["z_log10", "97.5%"]
-      
-      if(hb_value == TRUE){
-        res <- data.frame(parameters = c("kd", "hb", "z", "kk"),
-                          median = c(kd, hb, z, kk),
-                          Q2.5 = c(kd_inf95, hb_inf95, z_inf95, kk_inf95),
-                          Q97.5 = c(kd_sup95, hb_sup95, z_sup95, kk_sup95))
-      } else{
-        res <- data.frame(parameters = c("kd", "z", "kk"),
-                          median = c(kd, z, kk),
-                          Q2.5 = c(kd_inf95, z_inf95, kk_inf95),
-                          Q97.5 = c(kd_sup95, z_sup95, kk_sup95))
-      }
-      
-    } else if (model_type == "IT"){
-      alpha <- 10^res.M$quantiles["alpha_log10", "50%"]
-      alpha_inf95 <- 10^res.M$quantiles["alpha_log10", "2.5%"]
-      alpha_sup95 <- 10^res.M$quantiles["alpha_log10", "97.5%"]
-      
-      beta <- 10^res.M$quantiles["beta_log10", "50%"]
-      beta_inf95 <- 10^res.M$quantiles["beta_log10", "2.5%"]
-      beta_sup95 <- 10^res.M$quantiles["beta_log10", "97.5%"]
-      
-      if(hb_value == TRUE){
-        res <- data.frame(parameters = c("kd", "hb", "alpha", "beta"),
-                          median = c(kd, hb, alpha, beta),
-                          Q2.5 = c(kd_inf95, hb_inf95, alpha_inf95, beta_inf95),
-                          Q97.5 = c(kd_sup95, hb_sup95, alpha_sup95, beta_sup95))
-      } else{
-        res <- data.frame(parameters = c("kd", "alpha", "beta"),
-                          median = c(kd, alpha, beta),
-                          Q2.5 = c(kd_inf95, alpha_inf95, beta_inf95),
-                          Q97.5 = c(kd_sup95, alpha_sup95, beta_sup95))
-      }
-    } else {
-      stop("please, provide the 'model_type': 'IT' or 'SD'")
-    }
-    
-    return(res)
-}
+#' Fits a TKTD model for survival analysis using Bayesian inference
+#'
+#' This function estimates the parameters of a TKTD model ('SD' or 'IT')
+#' for survival analysis using Bayesian inference. In this model,
+#' the survival rate of individuals is modeled as a function of the chemical compound
+#' concentration with a mechanistic description of the effects on survival over
+#' time.
+#' 
+#' The function \code{survFit} returns the parameter estimates of Toxicokinetic-toxicodynamic (TKTD) models
+#' \code{SD} for 'Stochastic Death' or \code{IT} fo 'Individual Tolerance'.
+#' TKTD models, and particularly the General Unified Threshold model of
+#' Survival (GUTS), provide a consistent process-based
+#' framework to analyse both time and concentration dependent datasets.
+#' In GUTS-SD, all organisms are assumed to have the same internal concentration 
+#' threshold (denoted \eqn{z}), and, once exceeded, the instantaneous probability
+#' to die increases linearly with the internal concentration.
+#' In GUTS-IT, the threshold concentration is distributed among all the organisms, and once 
+#' exceeded in one individual, this individual dies immediately.
+#' 
+#' 
+#' When class of \code{object} is \code{survDataCstExp}, see \link[=survFit.survDataCstExp]{survFit.survDataCstExp} ;
+#' and for a \code{survDataVarExp}, see \link[=survFit.survDataVarExp]{survFit.survDataVarExp}.
+#'
+#' @rdname survFit
+#'
+#' @param data An object of class \code{survDataCstExp} or \code{survDataVarExp}.
+#' @param model_type Can be \code{"SD"} or \code{"IT"} to choose
+#'   between "Stochastic Death" or "Individual Tolerance" models
+#'   (resp.). See the modeling vignette for details.
+#' @param quiet If \code{FALSE}, prints logs and progress bar from
+#'   JAGS.
+#' @param n.chains A positive integer specifying the number of MCMC chains. The minimum required number 
+#' of chains is 2.
+#' @param n.adapt A positive integer specifying the number of iterations for adaptation. If \code{n.adapt} = 0
+#'  then no adaptation takes place.
+#' @param n.iter A positive integer specifying the number of iterations to monitor for each chain.
+#' @param n.warmup A positive integer specifying the number of warmup (aka burnin) iterations per chain. 
+#' @param thin.interval A positive integer specifying the period to monitor.
+#' @param limit.sampling if \code{FALSE} (default is \code{TRUE}), there is no limit to the number of iterations
+#' in MCMC imposed by the \code{raftery.diag} test.
+#' @param dic.compute if \code{TRUE} (default is \code{FALSE}), it generates penalized deviance samples to compute
+#' the Deviance Information Criterion (DIC) with the \code{rjags} package
+#' @param dic.type type of penalty to use. A string identifying the type of penalty: \code{pD} or \code{popt}
+#'  (see function \code{\link[rjags]{dic.samples}})
+#' @param hb_value If \code{TRUE}, the background mortality \code{hb} is taken into account.
+#' If \code{FALSE}, parameter \code{hb} is set to 0. The default is \code{TRUE}.
+#' @param  hb_valueFIXED If \code{hb_value} is \code{FALSE}, then \code{hb_valueFiXED} is the value to fix \code{hb}.
+#'   If \code{hb_value} is \code{FALSE} and  \code{hb_valueFiXED} is \code{NA}, then \code{hb} is fixed to \code{0}.
+#' @param extend_time Number of for each replicate used for linear 
+#' interpolation (comprise between time to compute and fitting accuracy)
+#' @param \dots Further arguments to be passed to generic methods
+#' 
+#' @references Jager, T., Albert, C., Preuss, T. G. and Ashauer, R. (2011) 
+#' General unified threshold model of survival-a toxicokinetic-toxicodynamic
+#'  framework for ecotoxicology, \emph{Environmental Science and Technology}, 45, 2529-2540.
+#' 303-314.
+#' 
+#' @keywords estimation 
+#' 
+#' @return an object of class \code{survFit}
+#' 
+#' @export
+#' 
+#' 
+survFit <- function(data,
+                    model_type,
+                    quiet,
+                    n.chains,
+                    n.adapt,
+                    n.iter,
+                    n.warmup,
+                    thin.interval,
+                    limit.sampling,
+                    dic.compute,
+                    dic.type,
+                    hb_value,
+                    hb_valueFIXED,
+                    ...){
+  UseMethod("survFit")
+}
+
+
+################################################################################
+#
+#  PRIORS
+#
+################################################################################
+
+#' Create a list of scalars giving priors to use in Bayesian inference.
+#'
+#' @param x An object of class \code{survData}
+#' @param model_type TKTD model type ('SD' or 'IT')
+#' 
+#' @return A list for parameterization of priors for Bayesian inference with JAGS.
+#' 
+#' @export
+#' 
+priors_survData <- function(x, model_type = NULL){
+  
+  data <- filter(x, time != 0)
+  
+  # Parameter calculation of concentration min and max
+  conc_min <- min(data$conc[data$conc != 0], na.rm = TRUE) # to remove 0 and NA
+  conc_max <- max(data$conc, na.rm = TRUE)
+  
+  time_min <- min(data$time)
+  time_max <- max(data$time)
+  
+  conc_unic <- sort(unique(data$conc))
+  conc_unicPrec <- dplyr::lag(conc_unic)
+  conc_minDelta <- min(conc_unic - conc_unicPrec, na.rm = TRUE)
+  
+  ##
+  ## dominant rate constant: kd
+  ##
+  
+  kd_max <- -log(0.001) / time_min
+  kd_min <- -log(0.999) / time_max
+  
+  ##
+  ## background hazard rate
+  ##
+  
+  hb_max <- -log(0.5) / time_min
+  hb_min <- -log(0.999) / time_max
+  
+  ##
+  ## killing rate parameter: kk
+  ##
+  
+  kk_max <- -log(0.001) / (time_min * conc_minDelta)
+  kk_min <- -log(0.999) / (time_max * (conc_max - conc_min))
+  
+  ##
+  ## beta
+  ##
+  
+  beta_minlog10 <- -2
+  beta_maxlog10 <- 2
+  
+  priorsMinMax <- list(
+    conc_min = conc_min,
+    conc_max = conc_max,
+    
+    kd_min = kd_min,
+    kd_max = kd_max,
+    
+    hb_min = hb_min,
+    hb_max = hb_max )
+  
+  ##
+  ## Construction of the list of priors
+  ##
+  
+  priorsList <-  list(
+    ##
+    ## dominant rate constant: kd
+    ##
+    kd_meanlog10 = (log10(kd_max) + log10(kd_min)) / 2 ,
+    kd_sdlog10 = (log10(kd_max) - log10(kd_min)) / 4 ,
+    ##
+    ## background hazard rate
+    ##
+    hb_meanlog10 = (log10(hb_max) + log10(hb_min)) / 2 ,
+    hb_sdlog10 = (log10(hb_max) - log10(hb_min)) / 4
+  )
+  
+  if(model_type == "IT"){
+    
+    ## priorsMinMax
+    priorsMinMax$beta_min <- beta_minlog10
+    priorsMinMax$beta_max <- beta_maxlog10
+    
+    ## priorsList
+    ### non effect threshold: scale parameter & median of a log-logistic distribution
+    priorsList$alpha_meanlog10 <- (log10(conc_max) + log10(conc_min)) / 2
+    priorsList$alpha_sdlog10 <- (log10(conc_max) - log10(conc_min)) / 4
+    
+    ### shape parameter of a log-logistic distribution
+    priorsList$beta_minlog10 <- beta_minlog10
+    priorsList$beta_maxlog10 <- beta_maxlog10
+    
+  } else if (model_type == "SD"){
+    
+    ## priorsMinMax
+    priorsMinMax$kk_min <- kk_min
+    priorsMinMax$kk_max <- kk_max
+    
+    ## priorsList
+    ### killing rate parameter: kk
+    priorsList$kk_meanlog10 <- (log10(kk_max) + log10(kk_min)) / 2
+    priorsList$kk_sdlog10 <- (log10(kk_max) - log10(kk_min)) / 4
+    ### non effect threshold: z
+    priorsList$z_meanlog10 <- (log10(conc_max) + log10(conc_min)) / 2
+    priorsList$z_sdlog10 <- (log10(conc_max) - log10(conc_min)) / 4
+  } else stop("please, provide the 'model_type': 'IT' or 'SD'")
+  
+  
+  return(list(priorsList = priorsList,
+              priorsMinMax = priorsMinMax))
+}
+
+
+#############################################################################
+#
+#    survFit_TKTD_params
+#
+#############################################################################
+  
+survFit_TKTD_params <- function(mcmc, model_type, hb_value = TRUE) {
+    # create the table of posterior estimated parameters
+    # for the survival analyses
+    # INPUT:
+    # - mcmc:  list of estimated parameters for the model with each item representing
+    # a chain
+    # OUTPUT:
+    # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
+    # parameters)
+    
+    # Retrieving parameters of the model
+    res.M <- summary(mcmc)
+    
+    kd <- 10^res.M$quantiles["kd_log10", "50%"]
+    kd_inf95 <- 10^res.M$quantiles["kd_log10", "2.5%"]
+    kd_sup95 <- 10^res.M$quantiles["kd_log10", "97.5%"]
+    
+    if(hb_value == TRUE){
+      hb <- 10^res.M$quantiles["hb_log10", "50%"]
+      hb_inf95 <- 10^res.M$quantiles["hb_log10", "2.5%"]
+      hb_sup95 <- 10^res.M$quantiles["hb_log10", "97.5%"]
+    }
+    
+    if(model_type == "SD"){
+      kk <- 10^res.M$quantiles["kk_log10", "50%"]
+      kk_inf95 <- 10^res.M$quantiles["kk_log10", "2.5%"]
+      kk_sup95 <- 10^res.M$quantiles["kk_log10", "97.5%"]
+      
+      z <- 10^res.M$quantiles["z_log10", "50%"]
+      z_inf95 <- 10^res.M$quantiles["z_log10", "2.5%"]
+      z_sup95 <- 10^res.M$quantiles["z_log10", "97.5%"]
+      
+      if(hb_value == TRUE){
+        res <- data.frame(parameters = c("kd", "hb", "z", "kk"),
+                          median = c(kd, hb, z, kk),
+                          Q2.5 = c(kd_inf95, hb_inf95, z_inf95, kk_inf95),
+                          Q97.5 = c(kd_sup95, hb_sup95, z_sup95, kk_sup95))
+      } else{
+        res <- data.frame(parameters = c("kd", "z", "kk"),
+                          median = c(kd, z, kk),
+                          Q2.5 = c(kd_inf95, z_inf95, kk_inf95),
+                          Q97.5 = c(kd_sup95, z_sup95, kk_sup95))
+      }
+      
+    } else if (model_type == "IT"){
+      alpha <- 10^res.M$quantiles["alpha_log10", "50%"]
+      alpha_inf95 <- 10^res.M$quantiles["alpha_log10", "2.5%"]
+      alpha_sup95 <- 10^res.M$quantiles["alpha_log10", "97.5%"]
+      
+      beta <- 10^res.M$quantiles["beta_log10", "50%"]
+      beta_inf95 <- 10^res.M$quantiles["beta_log10", "2.5%"]
+      beta_sup95 <- 10^res.M$quantiles["beta_log10", "97.5%"]
+      
+      if(hb_value == TRUE){
+        res <- data.frame(parameters = c("kd", "hb", "alpha", "beta"),
+                          median = c(kd, hb, alpha, beta),
+                          Q2.5 = c(kd_inf95, hb_inf95, alpha_inf95, beta_inf95),
+                          Q97.5 = c(kd_sup95, hb_sup95, alpha_sup95, beta_sup95))
+      } else{
+        res <- data.frame(parameters = c("kd", "alpha", "beta"),
+                          median = c(kd, alpha, beta),
+                          Q2.5 = c(kd_inf95, alpha_inf95, beta_inf95),
+                          Q97.5 = c(kd_sup95, alpha_sup95, beta_sup95))
+      }
+    } else {
+      stop("please, provide the 'model_type': 'IT' or 'SD'")
+    }
+    
+    return(res)
+}
diff --git a/R/survFit.survDataCstExp.R b/R/survFit.survDataCstExp.R
index 65c6e51ce14576a8a992a3ff14b0a74e069a7a9c..099b7a8ae403b7b26e778eb23197a791a7f85195 100644
--- a/R/survFit.survDataCstExp.R
+++ b/R/survFit.survDataCstExp.R
@@ -1,283 +1,265 @@
-#' @rdname survFit
-#'
-#' @return The function returns an object of class \code{survFitCstExp}, which is
-#' a list with the following information:
-#' \item{estim.par}{a table of the estimated parameters as medians and 95\%
-#' credible intervals}
-#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
-#' distribution}
-#' \item{model}{a JAGS model object}
-#' \item{dic}{return the Deviance Information Criterion (DIC) if \code{dic.compute} is \code{TRUE}}
-#' \item{warnings}{a table with warning messages}
-#' \item{parameters}{a list of parameter names used in the model}
-#' \item{n.chains}{an integer value corresponding to the number of chains used
-#' for the MCMC computation}
-#' \item{mcmcInfo}{a table with the number of iterations, chains, adaptation, warmup and the thinning interval.} 
-#' \item{jags.data}{a list of the data passed to the JAGS model}
-#' \item{model_type}{the type of TKTD model used: \code{SD} or \code{IT}}
-#'
-#' @examples
-#' 
-#' # Example with time-variable exposure profile#'
-#' # (1) Load the survival data
-#' data(propiconazole)
-#' # (2) Create an object of class "survData"
-#' dataset  <- survData(propiconazole)
-#' \donttest{
-#' # (3) Run the survFit function with TKTD model 'SD' or 'IT' 
-#' out <- survFit(dataset , model_type = "SD")
-#' # (4) Summarize look the estimated parameters
-#' summary(out)
-#' # (5) Plot the fitted curve
-#' plot(out, adddata = TRUE)
-#' # (6) Plot the fitted curve with ggplot style and CI as spaghetti
-#' plot(out, spaghetti = TRUE , adddata = TRUE)
-#' }
-#'
-#' @import rjags
-#' @importFrom stats update
-#' @importFrom dplyr group_by summarise filter
-#'
-#' @export
-#'
-survFit.survDataCstExp <- function(data,
-                                   model_type = NULL,
-                                   quiet = FALSE,
-                                   n.chains = 3,
-                                   n.adapt = 3000,
-                                   n.iter = NULL,
-                                   n.warmup = NULL,
-                                   thin.interval = NULL,
-                                   limit.sampling = TRUE,
-                                   dic.compute = FALSE,
-                                   dic.type = "pD",
-                                   hb_value = TRUE,
-                                   hb_valueFIXED = NA,
-                                   ...){
-  
-  ##
-  ## Pre modelling measure and tests
-  ##
-
-  ### ensures model_type is one of "SD" and "IT"
-  if(is.null(model_type) || ! (model_type %in% c("SD","IT"))) {
-    stop("You need to specify a 'model_type' among 'SD' or 'IT'")
-  }
-  ### check number of sample for the diagnostic procedure
-  if (n.chains < 2) {
-    stop('2 or more parallel chains required')
-  }
-  ### warning message when hb_value = NULL
-  if(hb_value==FALSE){
-    warning("This is not an error message: the parameter 'hb' is fixed. This means that the correlation between
-            'hb' and other parameters is ignored.")
-    ## set default hb_valueFIXED
-    if(is.na(hb_valueFIXED)){
-      hb_valueFIXED = 0
-    }
-  }
-  ##
-  ## Data and Priors for model
-  ##
-  
-  globalData <- modelData(data,  model_type = model_type)
-  
-  jags.data <- unlist(list(globalData$dataList, globalData$priorsList), recursive = FALSE)
-  
-  jags.data_fit <- jags.data ; jags.data_fit$replicate = NULL
-  
-  priorsMinMax <- globalData$priorsMinMax
-
-  ##
-  ## Define model
-  ##
-
-
-  if(model_type == "SD"){
-    if(hb_value == TRUE){
-      jags.data_fit$hb_value = 1
-      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
-      parameters_sampling <- c("kd_log10", "hb_log10", "kk_log10", "z_log10")
-      parameters <- c("kd_log10", "hb_log10", "kk_log10", "hb", "z_log10", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    } else{
-      jags.data_fit$hb_value = 0
-      jags.data_fit$hb_valueFIXED = hb_valueFIXED
-      parameters_sampling <- c("kd_log10", "kk_log10", "z_log10")
-      parameters <- c("kd_log10", "kk_log10", "z_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    }
-    file_to_use <- jags_TKTD_cstSD
-    
-
-  } else if(model_type == "IT"){
-    ### Determine sampling parameters
-    if(hb_value == TRUE){
-      jags.data_fit$hb_value = 1
-      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
-      parameters_sampling <- c("kd_log10", "hb_log10", "alpha_log10", "beta_log10")
-      parameters <- c("kd_log10", "hb_log10","alpha_log10", "beta_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    } else{
-      jags.data_fit$hb_value = 0
-      jags.data_fit$hb_valueFIXED = hb_valueFIXED
-      parameters_sampling <- c("kd_log10", "alpha_log10", "beta_log10")
-      parameters <- c("kd_log10","alpha_log10", "beta_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    }
-    file_to_use <- jags_TKTD_cstIT
-  }
-
-
-  model <- survLoadModel(model.program = file_to_use,
-                         data = jags.data_fit,
-                         n.chains = n.chains,
-                         Nadapt = n.adapt,
-                         quiet = quiet)
-
-  ##
-  ## estimate the number of iteration required for convergency of chains
-  ## by using the raftery.diag
-  ##
-
-  if(is.null(n.warmup) | is.null(thin.interval) | is.null(n.iter)){
-
-
-    sampling.parameters <- modelSamplingParameters(model,
-                                                   parameters_sampling,
-                                                   n.chains = n.chains, quiet = quiet)
-    
-    if (sampling.parameters$niter > 2e5 & limit.sampling == TRUE){
-      stop("The model needs too many iterations to provide reliable parameter estimates !")
-    }
-      
-    n.warmup = sampling.parameters$burnin
-    thin.interval = sampling.parameters$thin
-    n.iter = sampling.parameters$niter
-
-  }
-
-  ### model to check priors with the model
-  update(model, n.warmup)
-  
-  if(dic.compute == TRUE){ # Deviance Information Criterion
-    dic <- dic.samples(model,
-                       n.iter = n.iter,
-                       thin = thin.interval,
-                       type = dic.type) 
-  } else dic = NULL
- 
-  
-  mcmc =  coda.samples(model,
-                       variable.names = parameters,
-                       n.iter = n.iter,
-                       thin = thin.interval,
-                       progress.bar = ifelse(quiet, "none", "text"))
-
-  ##
-  ## Cheking posterior range with data from experimental design:
-  ##
-
-  warnings <- msgTableCreate()
-
-  estim.par <- survFit_TKTD_params(mcmc, model_type = model_type, hb_value = hb_value)
-
-  if (filter(estim.par, parameters == "kd")$Q97.5 > priorsMinMax$kd_max){
-    ##store warning in warnings table
-    msg <- "The estimation of the dominant rate constant (model parameter kd)
-    lies outside the range used to define its prior distribution which indicates
-    that this rate is very high and difficult to estimate from this experiment !"
-    warnings <- msgTableAdd(warnings, "kd_outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-  if(hb_value == TRUE){
-    if (filter(estim.par, parameters == "hb")$Q2.5 < priorsMinMax$hb_min){
-      ##store warning in warnings table
-      msg <- "The estimation of the natural instantaneous mortality rate
-    (model parameter hb) lies outside the range used to define its prior
-    distribution which indicates that this rate is very low and so difficult
-    to estimate from this experiment !"
-      warnings <- msgTableAdd(warnings, "hb_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-  }
-
-  ### for SD model
-  if(model_type == "SD"){
-    if (filter(estim.par, parameters == "kk")$Q97.5 > priorsMinMax$kk_max){
-      ##store warning in warnings table
-      msg <- "The estimation of the killing rate (model parameter kk) lies
-      outside the range used to define its prior distribution which indicates
-      that this rate is very high and difficult to estimate from this experiment !"
-      warnings <- msgTableAdd(warnings, "kk_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-
-    if (filter(estim.par, parameters == "z")$Q2.5 < priorsMinMax$conc_min ||
-        filter(estim.par, parameters == "z")$Q97.5 > priorsMinMax$conc_max){
-      ##store warning in warnings table
-      msg <- "The estimation of Non Effect Concentration threshold (NEC)
-      (model parameter z) lies outside the range of tested concentration and
-      may be unreliable as the prior distribution on this parameter
-      is defined from this range !"
-      warnings <- msgTableAdd(warnings, "z_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-
-  }
-
-  ### for IT model
-  if(model_type == "IT"){
-
-    if (filter(estim.par, parameters == "alpha")$Q2.5 < priorsMinMax$conc_min ||
-        filter(estim.par, parameters == "alpha")$Q97.5 > priorsMinMax$conc_max){
-      ##store warning in warnings table
-      msg <- "The estimation of log-logistic median (model parameter alpha) lies
-      outside the range of tested concentration and may be unreliable as the prior
-      distribution on this parameter is defined from this range !"
-      warnings <- msgTableAdd(warnings, "alpha_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-  }
-
-  ##
-  ## MCMC information
-  ## 
-  mcmcInfo = data.frame(n.iter = n.iter,
-                        n.chains = n.chains,
-                        n.adapt = n.adapt,
-                        thin.interval = thin.interval,
-                        n.warmup = n.warmup)
-  ##
-  ##
-  ##
-  transformed.data <- data.frame(
-    replicate = jags.data$replicate,
-    time = jags.data$time,
-    conc = jags.data$conc,
-    Nsurv = jags.data$Nsurv
-  ) %>%
-    group_by(replicate) %>%
-    mutate(Ninit = max(Nsurv, na.rm = TRUE))
-  
-  ##
-  ## OUTPUT
-  ##
-  
-  OUT <- list(estim.par = estim.par,
-              mcmc = mcmc,
-              model = model,
-              dic = dic,
-              parameters = parameters,
-              mcmcInfo = mcmcInfo,
-              jags.data = jags.data,
-              warnings = warnings,
-              model_type = model_type,
-              transformed.data = transformed.data,
-              original.data = data,
-              hb_valueFIXED = hb_valueFIXED)
-
-  class(OUT) <- c("survFitCstExp", "survFit")
-  return(OUT)
-}
+#' @rdname survFit
+#'
+#' @return The function returns an object of class \code{survFitCstExp}, which is
+#' a list with the following information:
+#' \item{estim.par}{a table of the estimated parameters as medians and 95\%
+#' credible intervals}
+#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
+#' distribution}
+#' \item{model}{a JAGS model object}
+#' \item{dic}{return the Deviance Information Criterion (DIC) if \code{dic.compute} is \code{TRUE}}
+#' \item{warnings}{a table with warning messages}
+#' \item{parameters}{a list of parameter names used in the model}
+#' \item{n.chains}{an integer value corresponding to the number of chains used
+#' for the MCMC computation}
+#' \item{mcmcInfo}{a table with the number of iterations, chains, adaptation, warmup and the thinning interval.} 
+#' \item{jags.data}{a list of the data passed to the JAGS model}
+#' \item{model_type}{the type of TKTD model used: \code{SD} or \code{IT}}
+#'
+#' @import rjags
+#' @importFrom stats update
+#' @importFrom dplyr group_by summarise filter
+#'
+#' @export
+#'
+survFit.survDataCstExp <- function(data,
+                                   model_type = NULL,
+                                   quiet = FALSE,
+                                   n.chains = 3,
+                                   n.adapt = 3000,
+                                   n.iter = NULL,
+                                   n.warmup = NULL,
+                                   thin.interval = NULL,
+                                   limit.sampling = TRUE,
+                                   dic.compute = FALSE,
+                                   dic.type = "pD",
+                                   hb_value = TRUE,
+                                   hb_valueFIXED = NA,
+                                   ...){
+  
+  ##
+  ## Pre modelling measure and tests
+  ##
+
+  ### ensures model_type is one of "SD" and "IT"
+  if(is.null(model_type) || ! (model_type %in% c("SD","IT"))) {
+    stop("You need to specify a 'model_type' among 'SD' or 'IT'")
+  }
+  ### check number of sample for the diagnostic procedure
+  if (n.chains < 2) {
+    stop('2 or more parallel chains required')
+  }
+  ### warning message when hb_value = NULL
+  if(hb_value==FALSE){
+    warning("This is not an error message: the parameter 'hb' is fixed. This means that the correlation between
+            'hb' and other parameters is ignored.")
+    ## set default hb_valueFIXED
+    if(is.na(hb_valueFIXED)){
+      hb_valueFIXED = 0
+    }
+  }
+  ##
+  ## Data and Priors for model
+  ##
+  
+  globalData <- modelData(data,  model_type = model_type)
+  
+  jags.data <- unlist(list(globalData$dataList, globalData$priorsList), recursive = FALSE)
+  
+  jags.data_fit <- jags.data ; jags.data_fit$replicate = NULL
+  
+  priorsMinMax <- globalData$priorsMinMax
+
+  ##
+  ## Define model
+  ##
+
+
+  if(model_type == "SD"){
+    if(hb_value == TRUE){
+      jags.data_fit$hb_value = 1
+      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
+      parameters_sampling <- c("kd_log10", "hb_log10", "kk_log10", "z_log10")
+      parameters <- c("kd_log10", "hb_log10", "kk_log10", "hb", "z_log10", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    } else{
+      jags.data_fit$hb_value = 0
+      jags.data_fit$hb_valueFIXED = hb_valueFIXED
+      parameters_sampling <- c("kd_log10", "kk_log10", "z_log10")
+      parameters <- c("kd_log10", "kk_log10", "z_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    }
+    file_to_use <- jags_TKTD_cstSD
+    
+
+  } else if(model_type == "IT"){
+    ### Determine sampling parameters
+    if(hb_value == TRUE){
+      jags.data_fit$hb_value = 1
+      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
+      parameters_sampling <- c("kd_log10", "hb_log10", "alpha_log10", "beta_log10")
+      parameters <- c("kd_log10", "hb_log10","alpha_log10", "beta_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    } else{
+      jags.data_fit$hb_value = 0
+      jags.data_fit$hb_valueFIXED = hb_valueFIXED
+      parameters_sampling <- c("kd_log10", "alpha_log10", "beta_log10")
+      parameters <- c("kd_log10","alpha_log10", "beta_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    }
+    file_to_use <- jags_TKTD_cstIT
+  }
+
+
+  model <- survLoadModel(model.program = file_to_use,
+                         data = jags.data_fit,
+                         n.chains = n.chains,
+                         Nadapt = n.adapt,
+                         quiet = quiet)
+
+  ##
+  ## estimate the number of iteration required for convergency of chains
+  ## by using the raftery.diag
+  ##
+
+  if(is.null(n.warmup) | is.null(thin.interval) | is.null(n.iter)){
+
+
+    sampling.parameters <- modelSamplingParameters(model,
+                                                   parameters_sampling,
+                                                   n.chains = n.chains, quiet = quiet)
+    
+    if (sampling.parameters$niter > 2e5 & limit.sampling == TRUE){
+      stop("The model needs too many iterations to provide reliable parameter estimates !")
+    }
+      
+    n.warmup = sampling.parameters$burnin
+    thin.interval = sampling.parameters$thin
+    n.iter = sampling.parameters$niter
+
+  }
+
+  ### model to check priors with the model
+  update(model, n.warmup)
+  
+  if(dic.compute == TRUE){ # Deviance Information Criterion
+    dic <- dic.samples(model,
+                       n.iter = n.iter,
+                       thin = thin.interval,
+                       type = dic.type) 
+  } else dic = NULL
+ 
+  
+  mcmc =  coda.samples(model,
+                       variable.names = parameters,
+                       n.iter = n.iter,
+                       thin = thin.interval,
+                       progress.bar = ifelse(quiet, "none", "text"))
+
+  ##
+  ## Cheking posterior range with data from experimental design:
+  ##
+
+  warnings <- msgTableCreate()
+
+  estim.par <- survFit_TKTD_params(mcmc, model_type = model_type, hb_value = hb_value)
+
+  if (filter(estim.par, parameters == "kd")$Q97.5 > priorsMinMax$kd_max){
+    ##store warning in warnings table
+    msg <- "The estimation of the dominant rate constant (model parameter kd)
+    lies outside the range used to define its prior distribution which indicates
+    that this rate is very high and difficult to estimate from this experiment !"
+    warnings <- msgTableAdd(warnings, "kd_outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+  if(hb_value == TRUE){
+    if (filter(estim.par, parameters == "hb")$Q2.5 < priorsMinMax$hb_min){
+      ##store warning in warnings table
+      msg <- "The estimation of the natural instantaneous mortality rate
+    (model parameter hb) lies outside the range used to define its prior
+    distribution which indicates that this rate is very low and so difficult
+    to estimate from this experiment !"
+      warnings <- msgTableAdd(warnings, "hb_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+  }
+
+  ### for SD model
+  if(model_type == "SD"){
+    if (filter(estim.par, parameters == "kk")$Q97.5 > priorsMinMax$kk_max){
+      ##store warning in warnings table
+      msg <- "The estimation of the killing rate (model parameter kk) lies
+      outside the range used to define its prior distribution which indicates
+      that this rate is very high and difficult to estimate from this experiment !"
+      warnings <- msgTableAdd(warnings, "kk_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+
+    if (filter(estim.par, parameters == "z")$Q2.5 < priorsMinMax$conc_min ||
+        filter(estim.par, parameters == "z")$Q97.5 > priorsMinMax$conc_max){
+      ##store warning in warnings table
+      msg <- "The estimation of Non Effect Concentration threshold (NEC)
+      (model parameter z) lies outside the range of tested concentration and
+      may be unreliable as the prior distribution on this parameter
+      is defined from this range !"
+      warnings <- msgTableAdd(warnings, "z_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+
+  }
+
+  ### for IT model
+  if(model_type == "IT"){
+
+    if (filter(estim.par, parameters == "alpha")$Q2.5 < priorsMinMax$conc_min ||
+        filter(estim.par, parameters == "alpha")$Q97.5 > priorsMinMax$conc_max){
+      ##store warning in warnings table
+      msg <- "The estimation of log-logistic median (model parameter alpha) lies
+      outside the range of tested concentration and may be unreliable as the prior
+      distribution on this parameter is defined from this range !"
+      warnings <- msgTableAdd(warnings, "alpha_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+  }
+
+  ##
+  ## MCMC information
+  ## 
+  mcmcInfo = data.frame(n.iter = n.iter,
+                        n.chains = n.chains,
+                        n.adapt = n.adapt,
+                        thin.interval = thin.interval,
+                        n.warmup = n.warmup)
+  ##
+  ##
+  ##
+  transformed.data <- data.frame(
+    replicate = jags.data$replicate,
+    time = jags.data$time,
+    conc = jags.data$conc,
+    Nsurv = jags.data$Nsurv
+  ) %>%
+    group_by(replicate) %>%
+    mutate(Ninit = max(Nsurv, na.rm = TRUE))
+  
+  ##
+  ## OUTPUT
+  ##
+  
+  OUT <- list(estim.par = estim.par,
+              mcmc = mcmc,
+              model = model,
+              dic = dic,
+              parameters = parameters,
+              mcmcInfo = mcmcInfo,
+              jags.data = jags.data,
+              warnings = warnings,
+              model_type = model_type,
+              transformed.data = transformed.data,
+              original.data = data,
+              hb_valueFIXED = hb_valueFIXED)
+
+  class(OUT) <- c("survFitCstExp", "survFit")
+  return(OUT)
+}
diff --git a/R/survFit.survDataVarExp.R b/R/survFit.survDataVarExp.R
index 4b245e29fc8995737c8fc2c0b0c834c3e0370b71..cddb9d326d56f37f696a4d69d976ac7002fad2e5 100644
--- a/R/survFit.survDataVarExp.R
+++ b/R/survFit.survDataVarExp.R
@@ -1,292 +1,274 @@
-#' @rdname survFit
-#'
-#' @return The function returns an object of class \code{survFitVarExp}, which is
-#' a list with the following information:
-#' \item{estim.par}{a table of the estimated parameters as medians and 95\%
-#' credible intervals}
-#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
-#' distribution}
-#' \item{model}{a JAGS model object}
-#' \item{dic}{return the Deviance Information Criterion (DIC) if \code{dic.compute} is \code{TRUE}}
-#' \item{warnings}{a table with warning messages}
-#' \item{parameters}{a list of parameter names used in the model}
-#' \item{n.chains}{an integer value corresponding to the number of chains used
-#' for the MCMC computation}
-#' \item{mcmcInfo}{a table with the number of iterations, chains, adaptation, warmup and the thinning interval} 
-#' \item{jags.data}{a list of the data passed to the JAGS model}
-#' \item{model_type}{the type of TKTD model used: \code{SD} or \code{IT}}
-#' 
-#' 
-#' @examples
-#'
-#' # Example with time-variable exposure profile
-#' # (1) Load the survival data
-#' data("propiconazole_pulse_exposure")
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole_pulse_exposure)
-#' \donttest{
-#' # (3) Run the survFit function with TKTD model 'SD' or 'IT' 
-#' out <- survFit(dataset , model_type = "SD")
-#' # (4) Summarize look the estimated parameters
-#' summary(out)
-#' # (5) Plot the fitted curve
-#' plot(out, adddata = FALSE)
-#' # (6) Plot the fitted curve with ggplot style and CI as spaghetti
-#' plot(out, spaghetti = TRUE)
-#' }
-#' 
-#' @import rjags
-#' 
-#' @export
-survFit.survDataVarExp <- function(data,
-                                 model_type = NULL,
-                                 quiet = FALSE,
-                                 n.chains = 3,
-                                 n.adapt = 1000,
-                                 n.iter = NULL,
-                                 n.warmup = NULL,
-                                 thin.interval = NULL,
-                                 limit.sampling = TRUE,
-                                 dic.compute = FALSE,
-                                 dic.type = "pD",
-                                 hb_value = TRUE,
-                                 hb_valueFIXED = NA,
-                                 extend_time = 100,
-                                 ...){
-  
-  ##
-  ## Pre modelling measure and tests
-  ##
-
-  ### ensures model_type is one of "SD" and "IT"
-  if(is.null(model_type) || ! (model_type %in% c("SD","IT"))) {
-    stop("You need to specify a 'model_type' among 'SD' or 'IT'")
-  }
-  ### check number of sample for the diagnostic procedure
-  if (n.chains < 2) {
-    stop('2 or more parallel chains required')
-  }
-  ### warning message when hb_value = NULL
-  if(hb_value==FALSE){
-    warning("This is not an error message: the parameter 'hb' is fixed. This means that the correlation between
-            'hb' and other parameters is ignored.")
-    ### Set default hb_valueFIXED
-    if(is.na(hb_valueFIXED)){
-      hb_valueFIXED = 0
-    }
-  }
-
-  ##
-  ## Data and Priors for model
-  ##
-  
-  globalData <- modelData(x = data, model_type = model_type, extend_time = extend_time)
-  
-  ### Remove the information of replicate since this is not used in JAGS, and so a warning message would be show
-  
-  jags.data <- globalData$modelData
-  
-  jags.data_fit <- jags.data
-  
-  jags.data_fit$replicate <- NULL
-  jags.data_fit$conc <- NULL
-  jags.data_fit$replicate_long <- NULL
-  
-  priorsData = globalData$priorsMinMax
-  
-  ##
-  ## Define model
-  ##
-  
-  if(model_type == "SD"){
-    
-    jags.data_fit$time = NULL # remove jags.data_fit$time for varSD model
-    
-    if(hb_value == TRUE){
-      ### Determine sampling parameters
-      jags.data_fit$hb_value = 1
-      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
-      parameters_sampling <- c("kd_log10", "hb_log10", "z_log10", "kk_log10")
-      parameters <- c("kd_log10", "hb_log10", "z_log10", "kk_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    } else{
-      ### Determine sampling parameters
-      jags.data_fit$hb_value = 0
-      jags.data_fit$hb_valueFIXED = hb_valueFIXED
-      parameters_sampling <- c("kd_log10", "z_log10", "kk_log10")
-      parameters <- c("kd_log10", "z_log10", "kk_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    }
-    file_to_use <- jags_TKTD_varSD
-    
-      
-  } else if(model_type == "IT"){
-    ### Determine sampling parameters
-    
-    if(hb_value == TRUE){
-      jags.data_fit$hb_value = 1
-      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
-      parameters_sampling <- c("kd_log10", "hb_log10","alpha_log10", "beta_log10")
-      parameters <- c("kd_log10", "hb_log10","alpha_log10", "beta_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
-    } else{
-      jags.data_fit$hb_value = 0
-      jags.data_fit$hb_valueFIXED = hb_valueFIXED
-      parameters_sampling <- c("kd_log10","alpha_log10", "beta_log10")
-      parameters <- c("kd_log10","alpha_log10", "beta_log10", "psurv", "hb", "Nsurv_ppc", "Nsurv_sim")
-    }
-    file_to_use <- jags_TKTD_varIT
-
-  }
-
-  model <- survLoadModel(model.program = file_to_use,
-                         data = jags.data_fit,
-                         n.chains = n.chains,
-                         Nadapt = n.adapt,
-                         quiet = quiet)
-
-  
-  ##
-  ## estimate the number of iteration required for convergency of chains
-  ## by using the raftery.diag
-  ##
-  
-  if(is.null(n.warmup) | is.null(thin.interval) | is.null(n.iter)){
-    
-    sampling.parameters <- modelSamplingParameters(model,
-                                                   parameters_sampling,
-                                                   n.chains = n.chains, quiet = quiet)
-    if (sampling.parameters$niter > 5e5)
-      stop("The model needs too many iterations to provide reliable parameter estimates !")
-    
-    n.warmup = sampling.parameters$burnin
-    thin.interval = sampling.parameters$thin
-    n.iter = sampling.parameters$niter
-    
-  }
-
-  ### model to check priors with the model
-  update(model, n.warmup)
-  
-  if(dic.compute == TRUE){ # Deviance Information Criterion
-    dic <- dic.samples(model,
-                       n.iter = n.iter,
-                       thin = thin.interval,
-                       type = dic.type) 
-  } else dic = NULL
-  
-  mcmc =  coda.samples(model,
-                       variable.names = parameters,
-                       n.iter = n.iter,
-                       thin = thin.interval)
-  
-  ##
-  ## Cheking posterior range with data from experimental design:
-  ##
-  
-  estim.par <- survFit_TKTD_params(mcmc, model_type = model_type, hb_value = hb_value)
-  
-  warnings <- msgTableCreate()
-  
-  if (filter(estim.par, parameters == "kd")$Q97.5 > priorsData$kd_max){
-    ## store warning in warnings table
-    msg <- "The estimation of the dominant rate constant (model parameter kd) lies 
-    outside the range used to define its prior distribution which indicates that this
-    rate is very high and difficult to estimate from this experiment !"
-    warnings <- msgTableAdd(warnings, "kd_outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-  
-  if(hb_value == TRUE){
-    if (filter(estim.par, parameters == "hb")$Q2.5 < priorsData$hb_min){
-      ## store warning in warnings table
-      msg <- "The estimation of the natural instantaneous mortality rate (model 
-    parameter hb) lies outside the range used to define its prior distribution 
-    which indicates that this rate is very low and so difficult to estimate 
-    from this experiment !"
-      warnings <- msgTableAdd(warnings, "hb_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-  }
-
-  ### for SD model
-  if(model_type == "SD"){
-    if (filter(estim.par, parameters == "kk")$Q97.5 > priorsData$kk_max){
-      ## store warning in warnings table
-      msg <- "The estimation of the killing rate (model parameter k) lies
-      outside the range used to define its prior distribution which indicates
-      that this rate is very high and difficult to estimate from this experiment !"
-      warnings <- msgTableAdd(warnings, "kk_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-    
-    if (filter(estim.par, parameters == "z")$Q2.5 < priorsData$conc_min ||
-        filter(estim.par, parameters == "z")$Q97.5 > priorsData$conc_max){
-      ## store warning in warnings table
-      msg <- "The estimation of Non Effect Concentration threshold (NEC) 
-      (model parameter z) lies outside the range of tested concentration 
-      and may be unreliable as the prior distribution on this parameter is
-      defined from this range !"
-      warnings <- msgTableAdd(warnings, "z_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-  }
-  
-  ### for IT model
-  if(model_type == "IT"){
-    
-    if (filter(estim.par, parameters == "alpha")$Q2.5 < priorsData$conc_min ||
-        filter(estim.par, parameters == "alpha")$Q97.5 > priorsData$conc_max){
-      ## store warning in warnings table
-      msg <- "The estimation of log-logistic median (model parameter alpha) 
-      lies outside the range of tested concentration and may be unreliable as 
-      the prior distribution on this parameter is defined from this range !"
-      warnings <- msgTableAdd(warnings, "alpha_outRange", msg)
-      ## print the message
-      warning(msg, call. = FALSE)
-    }
-  }
-  
-  ##
-  ## MCMC information
-  ## 
-  mcmcInfo = data.frame(n.iter = n.iter,
-                        n.chains = n.chains,
-                        n.adapt = n.adapt,
-                        thin.interval = thin.interval,
-                        n.warmup = n.warmup)
-  
-
-  ##
-  ##
-  ##
-  transformed.data <- data.frame(
-    replicate = jags.data$replicate,
-    time = jags.data$time,
-    conc = jags.data$conc,
-    Nsurv = jags.data$Nsurv
-  ) %>%
-    group_by(replicate) %>%
-    mutate(Ninit = max(Nsurv, na.rm = TRUE))
-  
-  ##
-  ## OUTPUT
-  ##
-  
-  OUT <- list(estim.par = estim.par,
-              mcmc = mcmc,
-              model = model,
-              dic = dic,
-              parameters = parameters,
-              mcmcInfo = mcmcInfo,
-              jags.data = jags.data,
-              warnings = warnings,
-              model_type = model_type,
-              transformed.data = transformed.data,
-              original.data = data,
-              hb_valueFIXED = hb_valueFIXED)
-  
-  class(OUT) <- c("survFitVarExp","survFit")
-  return(OUT)
-}
+#' @rdname survFit
+#'
+#' @return The function returns an object of class \code{survFitVarExp}, which is
+#' a list with the following information:
+#' \item{estim.par}{a table of the estimated parameters as medians and 95\%
+#' credible intervals}
+#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
+#' distribution}
+#' \item{model}{a JAGS model object}
+#' \item{dic}{return the Deviance Information Criterion (DIC) if \code{dic.compute} is \code{TRUE}}
+#' \item{warnings}{a table with warning messages}
+#' \item{parameters}{a list of parameter names used in the model}
+#' \item{n.chains}{an integer value corresponding to the number of chains used
+#' for the MCMC computation}
+#' \item{mcmcInfo}{a table with the number of iterations, chains, adaptation, warmup and the thinning interval} 
+#' \item{jags.data}{a list of the data passed to the JAGS model}
+#' \item{model_type}{the type of TKTD model used: \code{SD} or \code{IT}}
+#' 
+#' 
+#' @import rjags
+#' 
+#' @export
+survFit.survDataVarExp <- function(data,
+                                 model_type = NULL,
+                                 quiet = FALSE,
+                                 n.chains = 3,
+                                 n.adapt = 1000,
+                                 n.iter = NULL,
+                                 n.warmup = NULL,
+                                 thin.interval = NULL,
+                                 limit.sampling = TRUE,
+                                 dic.compute = FALSE,
+                                 dic.type = "pD",
+                                 hb_value = TRUE,
+                                 hb_valueFIXED = NA,
+                                 extend_time = 100,
+                                 ...){
+  
+  ##
+  ## Pre modelling measure and tests
+  ##
+
+  ### ensures model_type is one of "SD" and "IT"
+  if (is.null(model_type) || ! (model_type %in% c("SD","IT"))) {
+    stop("You need to specify a 'model_type' among 'SD' or 'IT'")
+  }
+  ### check number of sample for the diagnostic procedure
+  if (n.chains < 2) {
+    stop('2 or more parallel chains required')
+  }
+  ### warning message when hb_value = NULL
+  if (hb_value==FALSE){
+    warning("This is not an error message: the parameter 'hb' is fixed. This means that the correlation between
+            'hb' and other parameters is ignored.")
+    ### Set default hb_valueFIXED
+    if(is.na(hb_valueFIXED)){
+      hb_valueFIXED = 0
+    }
+  }
+
+  ##
+  ## Data and Priors for model
+  ##
+  
+  globalData <- modelData(x = data, model_type = model_type, extend_time = extend_time)
+  
+  ### Remove the information of replicate since this is not used in JAGS, and so a warning message would be show
+  
+  jags.data <- globalData$modelData
+  
+  jags.data_fit <- jags.data
+  
+  jags.data_fit$replicate <- NULL
+  jags.data_fit$conc <- NULL
+  jags.data_fit$replicate_long <- NULL
+  
+  priorsData = globalData$priorsMinMax
+  
+  ##
+  ## Define model
+  ##
+  
+  if(model_type == "SD"){
+    
+    jags.data_fit$time = NULL # remove jags.data_fit$time for varSD model
+    
+    if(hb_value == TRUE){
+      ### Determine sampling parameters
+      jags.data_fit$hb_value = 1
+      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
+      parameters_sampling <- c("kd_log10", "hb_log10", "z_log10", "kk_log10")
+      parameters <- c("kd_log10", "hb_log10", "z_log10", "kk_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    } else{
+      ### Determine sampling parameters
+      jags.data_fit$hb_value = 0
+      jags.data_fit$hb_valueFIXED = hb_valueFIXED
+      parameters_sampling <- c("kd_log10", "z_log10", "kk_log10")
+      parameters <- c("kd_log10", "z_log10", "kk_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    }
+    file_to_use <- jags_TKTD_varSD
+    
+      
+  } else if(model_type == "IT"){
+    ### Determine sampling parameters
+    
+    if(hb_value == TRUE){
+      jags.data_fit$hb_value = 1
+      jags.data_fit$hb_valueFIXED = -1 # just to have it in JAGS
+      parameters_sampling <- c("kd_log10", "hb_log10","alpha_log10", "beta_log10")
+      parameters <- c("kd_log10", "hb_log10","alpha_log10", "beta_log10", "hb", "psurv", "Nsurv_ppc", "Nsurv_sim")
+    } else{
+      jags.data_fit$hb_value = 0
+      jags.data_fit$hb_valueFIXED = hb_valueFIXED
+      parameters_sampling <- c("kd_log10","alpha_log10", "beta_log10")
+      parameters <- c("kd_log10","alpha_log10", "beta_log10", "psurv", "hb", "Nsurv_ppc", "Nsurv_sim")
+    }
+    file_to_use <- jags_TKTD_varIT
+
+  }
+
+  model <- survLoadModel(model.program = file_to_use,
+                         data = jags.data_fit,
+                         n.chains = n.chains,
+                         Nadapt = n.adapt,
+                         quiet = quiet)
+
+  
+  ##
+  ## estimate the number of iteration required for convergency of chains
+  ## by using the raftery.diag
+  ##
+  
+  if(is.null(n.warmup) | is.null(thin.interval) | is.null(n.iter)){
+    
+    sampling.parameters <- modelSamplingParameters(model,
+                                                   parameters_sampling,
+                                                   n.chains = n.chains, quiet = quiet)
+    if (sampling.parameters$niter > 5e5)
+      stop("The model needs too many iterations to provide reliable parameter estimates !")
+    
+    n.warmup = sampling.parameters$burnin
+    thin.interval = sampling.parameters$thin
+    n.iter = sampling.parameters$niter
+    
+  }
+
+  ### model to check priors with the model
+  update(model, n.warmup)
+  
+  if(dic.compute == TRUE){ # Deviance Information Criterion
+    dic <- dic.samples(model,
+                       n.iter = n.iter,
+                       thin = thin.interval,
+                       type = dic.type) 
+  } else dic = NULL
+  
+  mcmc =  coda.samples(model,
+                       variable.names = parameters,
+                       n.iter = n.iter,
+                       thin = thin.interval)
+  
+  ##
+  ## Cheking posterior range with data from experimental design:
+  ##
+  
+  estim.par <- survFit_TKTD_params(mcmc, model_type = model_type, hb_value = hb_value)
+  
+  warnings <- msgTableCreate()
+  
+  if (filter(estim.par, parameters == "kd")$Q97.5 > priorsData$kd_max){
+    ## store warning in warnings table
+    msg <- "The estimation of the dominant rate constant (model parameter kd) lies 
+    outside the range used to define its prior distribution which indicates that this
+    rate is very high and difficult to estimate from this experiment !"
+    warnings <- msgTableAdd(warnings, "kd_outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+  
+  if(hb_value == TRUE){
+    if (filter(estim.par, parameters == "hb")$Q2.5 < priorsData$hb_min){
+      ## store warning in warnings table
+      msg <- "The estimation of the natural instantaneous mortality rate (model 
+    parameter hb) lies outside the range used to define its prior distribution 
+    which indicates that this rate is very low and so difficult to estimate 
+    from this experiment !"
+      warnings <- msgTableAdd(warnings, "hb_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+  }
+
+  ### for SD model
+  if(model_type == "SD"){
+    if (filter(estim.par, parameters == "kk")$Q97.5 > priorsData$kk_max){
+      ## store warning in warnings table
+      msg <- "The estimation of the killing rate (model parameter k) lies
+      outside the range used to define its prior distribution which indicates
+      that this rate is very high and difficult to estimate from this experiment !"
+      warnings <- msgTableAdd(warnings, "kk_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+    
+    if (filter(estim.par, parameters == "z")$Q2.5 < priorsData$conc_min ||
+        filter(estim.par, parameters == "z")$Q97.5 > priorsData$conc_max){
+      ## store warning in warnings table
+      msg <- "The estimation of Non Effect Concentration threshold (NEC) 
+      (model parameter z) lies outside the range of tested concentration 
+      and may be unreliable as the prior distribution on this parameter is
+      defined from this range !"
+      warnings <- msgTableAdd(warnings, "z_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+  }
+  
+  ### for IT model
+  if(model_type == "IT"){
+    
+    if (filter(estim.par, parameters == "alpha")$Q2.5 < priorsData$conc_min ||
+        filter(estim.par, parameters == "alpha")$Q97.5 > priorsData$conc_max){
+      ## store warning in warnings table
+      msg <- "The estimation of log-logistic median (model parameter alpha) 
+      lies outside the range of tested concentration and may be unreliable as 
+      the prior distribution on this parameter is defined from this range !"
+      warnings <- msgTableAdd(warnings, "alpha_outRange", msg)
+      ## print the message
+      warning(msg, call. = FALSE)
+    }
+  }
+  
+  ##
+  ## MCMC information
+  ## 
+  mcmcInfo = data.frame(n.iter = n.iter,
+                        n.chains = n.chains,
+                        n.adapt = n.adapt,
+                        thin.interval = thin.interval,
+                        n.warmup = n.warmup)
+  
+
+  ##
+  ##
+  ##
+  transformed.data <- data.frame(
+    replicate = jags.data$replicate,
+    time = jags.data$time,
+    conc = jags.data$conc,
+    Nsurv = jags.data$Nsurv
+  ) %>%
+    group_by(replicate) %>%
+    mutate(Ninit = max(Nsurv, na.rm = TRUE))
+  
+  ##
+  ## OUTPUT
+  ##
+  
+  OUT <- list(estim.par = estim.par,
+              mcmc = mcmc,
+              model = model,
+              dic = dic,
+              parameters = parameters,
+              mcmcInfo = mcmcInfo,
+              jags.data = jags.data,
+              warnings = warnings,
+              model_type = model_type,
+              transformed.data = transformed.data,
+              original.data = data,
+              hb_valueFIXED = hb_valueFIXED)
+  
+  class(OUT) <- c("survFitVarExp","survFit")
+  return(OUT)
+}
diff --git a/R/survFitTKTD.R b/R/survFitTKTD.R
index f88d962ed4f27b8d04bb1b530000f00d61a4a088..af7a91ab271901e655bcdfc3f0331028a44d8519 100644
--- a/R/survFitTKTD.R
+++ b/R/survFitTKTD.R
@@ -1,333 +1,309 @@
-#' @importFrom dplyr filter
-survTKTDCreateJagsData <- function(data, comp) {
-  # Creates the parameters to define the prior of the TKTD model
-  # INPUTS
-  # data : object of class survData
-  # comp : if true return only min and max of prior
-  # OUTPUT
-  # jags.data : list of data required for the jags.model function
-
-  data <- data[data$time != 0, ]
-
-  # Parameter calculation of concentration min and max
-  concmin <- min(data$conc[data$conc != 0])
-  concmax <- max(data$conc)
-
-  tmin <- min(data$time)
-  tmax <- max(data$time)
-  conc <- sort(unique(data$conc))
-
-  deltaCmin = NULL
-  for (i in 2:length(conc)) {
-    deltaCmin[i - 1] <- conc[i] - conc[i - 1]
-  }
-  deltaCmin <- min(deltaCmin)
-
-  # ks parameters
-  ksmax <- -log(0.001) / (tmin * deltaCmin)
-  ksmin <- -log(0.999) / (tmax * (concmax - concmin))
-
-  meanlog10ks <- (log10(ksmax) + log10(ksmin)) / 2
-  sdlog10ks <- (log10(ksmax) - log10(ksmin)) / 4
-  taulog10ks <- 1 / sdlog10ks^2
-
-  # kd parameters
-  kdmax <- -log(0.001) / tmin
-  kdmin <- -log(0.999) / tmax
-
-  meanlog10kd <- (log10(kdmax) + log10(kdmin)) / 2
-
-  sdlog10kd <- (log10(kdmax) - log10(kdmin)) / 4
-  taulog10kd <- 1 / sdlog10kd^2
-
-  # m0 parameters
-  m0max <- -log(0.5) / tmin
-  m0min <- -log(0.999) / tmax
-
-  meanlog10m0 <- (log10(m0max) + log10(m0min)) / 2
-  sdlog10m0 <- (log10(m0max) - log10(m0min)) / 4
-  taulog10m0 <- 1/ sdlog10m0^2
-
-  # nec parameters
-  meanlog10nec <- (log10(concmax) + log10(concmin))/2
-  sdlog10nec <- (log10(concmax) - log10(concmin)) / 4
-  taulog10nec <- 1/ sdlog10nec^2
-
-  if (!comp) {
-    return(list( x = data$conc, y = data$N_alive,
-                 t = data$time, tprec = data$tprec,
-                 Nprec = data$Nprec,
-                 meanlog10ks = meanlog10ks, taulog10ks = taulog10ks,
-                 meanlog10kd = meanlog10kd,
-                 taulog10kd = taulog10kd,
-                 meanlog10m0 = meanlog10m0,
-                 taulog10m0 = taulog10m0,
-                 meanlog10nec = meanlog10nec, taulog10nec = taulog10nec,
-                 ndat = length(data$conc),
-                 bigtime = max(data$time) + 10))
-  } else {
-    return(list(log10necmin = log10(concmin),
-                log10necmax = log10(concmax),
-                log10ksmin = log10(ksmin),
-                log10ksmax = log10(ksmax),
-                log10kdmin = log10(kdmin),
-                log10kdmax = log10(kdmax),
-                log10m0min = log10(m0min),
-                log10m0max = log10(m0max)))
-  }
-}
-
-modelTKTDNorm <- "model {
-#########priors
-log10ks ~ dnorm(meanlog10ks, taulog10ks)
-log10NEC ~ dnorm(meanlog10nec, taulog10nec)
-log10kd ~ dnorm(meanlog10kd, taulog10kd)
-log10m0 ~ dnorm(meanlog10m0, taulog10m0)
-
-#####parameter transformation
-ks <- 10**log10ks
-NEC <- 10**log10NEC
-kd <- 10**log10kd
-m0 <- 10**log10m0
-
-##########Computation of the likelihood
-for (i in 1:ndat)
-{
-  tNEC[i] <- ifelse(x[i] > NEC, -1/kd * log( 1- R[i]), bigtime)
-  R[i] <- ifelse(x[i] > NEC, NEC/xcor[i], 0.1)
-  xcor[i] <- ifelse(x[i] > 0, x[i], 10)
-  tref[i] <- max(tprec[i], tNEC[i])
-
-  psurv[i] <- exp(-m0 * (t[i] - tprec[i]) + ifelse(t[i] > tNEC[i], -ks * ((x[i] - NEC) * (t[i] - tref[i]) + x[i]/kd * ( exp(-kd * t[i]) - exp(-kd * tref[i]))), 0))
-
-  y[i] ~ dbin(psurv[i] , Nprec[i])
-}
-}"
-
-survTKTDPARAMS <- function(mcmc) {
-  # create the table of posterior estimated parameters
-  # for the survival analyses
-  # INPUT:
-  # - mcmc:  list of estimated parameters for the model with each item representing
-  # a chain
-  # OUTPUT:
-  # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
-  # parameters)
-
-  # Retrieving parameters of the model
-  res.M <- summary(mcmc)
-
-  kd <- 10^res.M$quantiles["log10kd", "50%"]
-  kdinf <- 10^res.M$quantiles["log10kd", "2.5%"]
-  kdsup <- 10^res.M$quantiles["log10kd", "97.5%"]
-
-  ks <- 10^res.M$quantiles["log10ks", "50%"]
-  ksinf <- 10^res.M$quantiles["log10ks", "2.5%"]
-  kssup <- 10^res.M$quantiles["log10ks", "97.5%"]
-  nec <- 10^res.M$quantiles["log10NEC", "50%"]
-  necinf <- 10^res.M$quantiles["log10NEC", "2.5%"]
-  necsup <- 10^res.M$quantiles["log10NEC", "97.5%"]
-
-  m0 <- 10^res.M$quantiles["log10m0", "50%"]
-  m0inf <- 10^res.M$quantiles["log10m0", "2.5%"]
-  m0sup <- 10^res.M$quantiles["log10m0", "97.5%"]
-
-  # Definition of the parameter storage and storage data
-
-  rownames <- c("kd", "ks", "nec", "m0")
-  params <- c(kd, ks, nec, m0)
-  CIinf <- c(kdinf, ksinf, necinf, m0inf)
-  CIsup <- c(kdsup, kssup, necsup, m0sup)
-
-  res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
-                    row.names = rownames)
-
-  return(res)
-}
-
-#' Fits a TKTD for survival analysis using Bayesian inference for \code{survDataTKTD} object
-#'
-#' This function estimates the parameters of a TKTD
-#' model for survival analysis using Bayesian inference. In this model,
-#' the survival rate of individuals is modeled as a function of the chemical compound
-#' concentration with a mechanistic description of the effects on survival over
-#' time.
-#' 
-#'
-#' @param data An object of class \code{survData}.
-#' @param n.chains Number of MCMC chains. The minimum required number of chains
-#' is 2.
-#' @param quiet If \code{FALSE}, prints logs and progress bar from JAGS.
-#'
-#' @return The function returns an object of class \code{survFitTKTD}, which is
-#' a list with the following information:
-#' \item{estim.par}{a table of the estimated parameters as medians and 95\%
-#' credible intervals}
-#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
-#' distribution}
-#' \item{warnings}{a table with warning messages}
-#' \item{model}{a JAGS model object}
-#' \item{parameters}{a list of parameter names used in the model}
-#' \item{n.chains}{an integer value corresponding to the number of chains used
-#' for the MCMC computation}
-#' \item{n.iter}{a list of two indices indicating the beginning and the end of
-#' monitored iterations}
-#' \item{n.thin}{a numerical value corresponding to the thinning interval}
-#' \item{jags.data}{a list of data passed to the JAGS model}
-#' 
-#' @references
-#' 
-#' Delignette-Muller ML, Ruiz P and Veber P (2017).
-#' \emph{Robust fit of toxicokinetic-toxicodynamic models using prior knowledge contained in the design of survival toxicity tests.}
-#' 
-#' Bedaux, J., Kooijman, SALM (1994) Statistical analysis of toxicity tests,
-#' based on hazard modeling, \emph{Environmental and Ecological Statistics}, 1,
-#' 303-314.
-#' 
-#' 
-#' @keywords estimation
-#
-#' @examples
-#'
-#' # (1) Load the survival data
-#' data(propiconazole)
-#'
-#' # (2) Create an object of class "survData"
-#' dataset <- survData(propiconazole)
-#'
-#' \donttest{
-#' # (3) Run the survFitTKTD function
-#' out <- survFitTKTD(dataset)
-#'
-#' # (4) Summarize look the estimated parameters
-#' summary(out)
-#'
-#' # (5) Plot the fitted curve
-#' plot(out, adddata = TRUE)
-#'
-#' # (6) Plot the fitted curve with ggplot style and CI as spaghetti
-#' plot(out, spaghetti = TRUE , adddata = TRUE,
-#'      style = "ggplot")
-#' }
-#'
-#' @export
-#' @import rjags
-#' @importFrom dplyr group_by summarise filter
-#'
-survFitTKTD <- function(data,
-                        n.chains = 3,
-                        quiet = FALSE) {
-  # test class object
-  if(!is(data, "survData"))
-    stop("survFitTKTD: object of class survData expected")
-
-  # data transformation
-  data <- summarise(group_by(data, conc, time), N_alive = sum(Nsurv))
-
-  n <- nrow(data)
-  data$tprec <- NA
-  data$Nprec <- NA
-  data$N_init <- NA
-  for (i in 1:n)
-  {
-    if (data$time[i] != 0)
-    {
-      data$tprec[i] <- data$time[i - 1]
-      data$Nprec[i] <- data$N_alive[i - 1]
-      data$N_init[i] <- data$N_alive[data$conc == data$conc[i] & data$time == 0]
-    }
-  }
-
-  # control
-  datasurv0 <- subset(data, time == min(data$time[data$time != 0]))
-  datasurv0$time <- 0
-  datasurv0$N_alive <- datasurv0$N_init
-  data[is.na(data$tprec),
-       c("tprec", "Nprec", "N_init")] <- datasurv0[, c("tprec", "Nprec", "N_init")]
-
-  jags.data <- survTKTDCreateJagsData(data, FALSE)
-
-  # Define model
-
-  model <- survLoadModel(model.program = modelTKTDNorm,
-                         data = jags.data,
-                         n.chains,
-                         Nadapt = 3000, quiet)
-
-  # Determine sampling parameters
-  parameters <- c("log10kd", "log10NEC","log10ks", "log10m0")
-
-  sampling.parameters <- modelSamplingParameters(model,
-                                                 parameters, n.chains, quiet)
-
-  if (sampling.parameters$niter > 200000)
-    stop("The model needs too many iterations to provide reliable parameter estimates !")
-
-  # Sampling
-  prog.b <- ifelse(quiet == TRUE, "none", "text")
-
-  mcmc <- coda.samples(model, parameters,
-                       n.iter = sampling.parameters$niter,
-                       thin = sampling.parameters$thin,
-                       progress.bar = prog.b)
-
-  # summarize estime.par et CIs
-  # calculate from the estimated parameters
-  estim.par <- survTKTDPARAMS(mcmc)
-
-  # check the posterior range
-  priorBonds <- survTKTDCreateJagsData(data, TRUE)
-
-  warnings <- msgTableCreate()
-
-  if (log10(estim.par["ks", "Q97.5"]) > priorBonds$log10ksmax){
-    ##store warning in warnings table
-    msg <- "The estimation of the killing rate (model parameter ks) lies outside the range used to define its prior distribution which indicates that this rate is very high and difficult to estimate from this experiment !"
-    warnings <- msgTableAdd(warnings, "ks_outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-
-  if (log10(estim.par["kd", "Q97.5"]) > priorBonds$log10kdmax){
-    ##store warning in warnings table
-    msg <- "The estimation of the dominant rate constant (model parameter kd) lies outside the range used to define its prior distribution which indicates that this rate is very high and difficult to estimate from this experiment !"
-    warnings <- msgTableAdd(warnings, "kd_outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-
-
-  if (log10(estim.par["m0", "Q2.5"]) < priorBonds$log10m0min){
-    ##store warning in warnings table
-    msg <- "The estimation of the natural instantaneous mortality rate (model parameter m0) lies outside the range used to define its prior distribution which indicates that this rate is very low and so difficult to estimate from this experiment !"
-    warnings <- msgTableAdd(warnings, "hb_outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-
-  }
-
-  if (log10(estim.par["nec", "Q2.5"]) < priorBonds$log10necmin ||
-      log10(estim.par["nec", "Q97.5"]) > priorBonds$log10necmax){
-    ##store warning in warnings table
-    msg <- "The NEC estimation (model parameter nec) lies outside the range of tested concentration and may be unreliable as the prior distribution on this parameter is defined from this range !"
-    warnings <- msgTableAdd(warnings, "nec_outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-
-  #OUTPUT
-  OUT <- list(estim.par = estim.par,
-              mcmc = mcmc,
-              model = model,
-              parameters = parameters,
-              n.chains = summary(mcmc)$nchain,
-              n.iter = list(start = summary(mcmc)$start,
-                            end = summary(mcmc)$end),
-              warnings = warnings,
-              n.thin = summary(mcmc)$thin,
-              jags.data = jags.data,
-              transformed.data = data)
-
-  class(OUT) <- "survFitTKTD"
-  return(OUT)
-}
+#' @importFrom dplyr filter
+survTKTDCreateJagsData <- function(data, comp) {
+  # Creates the parameters to define the prior of the TKTD model
+  # INPUTS
+  # data : object of class survData
+  # comp : if true return only min and max of prior
+  # OUTPUT
+  # jags.data : list of data required for the jags.model function
+
+  data <- data[data$time != 0, ]
+
+  # Parameter calculation of concentration min and max
+  concmin <- min(data$conc[data$conc != 0])
+  concmax <- max(data$conc)
+
+  tmin <- min(data$time)
+  tmax <- max(data$time)
+  conc <- sort(unique(data$conc))
+
+  deltaCmin = NULL
+  for (i in 2:length(conc)) {
+    deltaCmin[i - 1] <- conc[i] - conc[i - 1]
+  }
+  deltaCmin <- min(deltaCmin)
+
+  # ks parameters
+  ksmax <- -log(0.001) / (tmin * deltaCmin)
+  ksmin <- -log(0.999) / (tmax * (concmax - concmin))
+
+  meanlog10ks <- (log10(ksmax) + log10(ksmin)) / 2
+  sdlog10ks <- (log10(ksmax) - log10(ksmin)) / 4
+  taulog10ks <- 1 / sdlog10ks^2
+
+  # kd parameters
+  kdmax <- -log(0.001) / tmin
+  kdmin <- -log(0.999) / tmax
+
+  meanlog10kd <- (log10(kdmax) + log10(kdmin)) / 2
+
+  sdlog10kd <- (log10(kdmax) - log10(kdmin)) / 4
+  taulog10kd <- 1 / sdlog10kd^2
+
+  # m0 parameters
+  m0max <- -log(0.5) / tmin
+  m0min <- -log(0.999) / tmax
+
+  meanlog10m0 <- (log10(m0max) + log10(m0min)) / 2
+  sdlog10m0 <- (log10(m0max) - log10(m0min)) / 4
+  taulog10m0 <- 1/ sdlog10m0^2
+
+  # nec parameters
+  meanlog10nec <- (log10(concmax) + log10(concmin))/2
+  sdlog10nec <- (log10(concmax) - log10(concmin)) / 4
+  taulog10nec <- 1/ sdlog10nec^2
+
+  if (!comp) {
+    return(list( x = data$conc, y = data$N_alive,
+                 t = data$time, tprec = data$tprec,
+                 Nprec = data$Nprec,
+                 meanlog10ks = meanlog10ks, taulog10ks = taulog10ks,
+                 meanlog10kd = meanlog10kd,
+                 taulog10kd = taulog10kd,
+                 meanlog10m0 = meanlog10m0,
+                 taulog10m0 = taulog10m0,
+                 meanlog10nec = meanlog10nec, taulog10nec = taulog10nec,
+                 ndat = length(data$conc),
+                 bigtime = max(data$time) + 10))
+  } else {
+    return(list(log10necmin = log10(concmin),
+                log10necmax = log10(concmax),
+                log10ksmin = log10(ksmin),
+                log10ksmax = log10(ksmax),
+                log10kdmin = log10(kdmin),
+                log10kdmax = log10(kdmax),
+                log10m0min = log10(m0min),
+                log10m0max = log10(m0max)))
+  }
+}
+
+modelTKTDNorm <- "model {
+#########priors
+log10ks ~ dnorm(meanlog10ks, taulog10ks)
+log10NEC ~ dnorm(meanlog10nec, taulog10nec)
+log10kd ~ dnorm(meanlog10kd, taulog10kd)
+log10m0 ~ dnorm(meanlog10m0, taulog10m0)
+
+#####parameter transformation
+ks <- 10**log10ks
+NEC <- 10**log10NEC
+kd <- 10**log10kd
+m0 <- 10**log10m0
+
+##########Computation of the likelihood
+for (i in 1:ndat)
+{
+  tNEC[i] <- ifelse(x[i] > NEC, -1/kd * log( 1- R[i]), bigtime)
+  R[i] <- ifelse(x[i] > NEC, NEC/xcor[i], 0.1)
+  xcor[i] <- ifelse(x[i] > 0, x[i], 10)
+  tref[i] <- max(tprec[i], tNEC[i])
+
+  psurv[i] <- exp(-m0 * (t[i] - tprec[i]) + ifelse(t[i] > tNEC[i], -ks * ((x[i] - NEC) * (t[i] - tref[i]) + x[i]/kd * ( exp(-kd * t[i]) - exp(-kd * tref[i]))), 0))
+
+  y[i] ~ dbin(psurv[i] , Nprec[i])
+}
+}"
+
+survTKTDPARAMS <- function(mcmc) {
+  # create the table of posterior estimated parameters
+  # for the survival analyses
+  # INPUT:
+  # - mcmc:  list of estimated parameters for the model with each item representing
+  # a chain
+  # OUTPUT:
+  # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
+  # parameters)
+
+  # Retrieving parameters of the model
+  res.M <- summary(mcmc)
+
+  kd <- 10^res.M$quantiles["log10kd", "50%"]
+  kdinf <- 10^res.M$quantiles["log10kd", "2.5%"]
+  kdsup <- 10^res.M$quantiles["log10kd", "97.5%"]
+
+  ks <- 10^res.M$quantiles["log10ks", "50%"]
+  ksinf <- 10^res.M$quantiles["log10ks", "2.5%"]
+  kssup <- 10^res.M$quantiles["log10ks", "97.5%"]
+  nec <- 10^res.M$quantiles["log10NEC", "50%"]
+  necinf <- 10^res.M$quantiles["log10NEC", "2.5%"]
+  necsup <- 10^res.M$quantiles["log10NEC", "97.5%"]
+
+  m0 <- 10^res.M$quantiles["log10m0", "50%"]
+  m0inf <- 10^res.M$quantiles["log10m0", "2.5%"]
+  m0sup <- 10^res.M$quantiles["log10m0", "97.5%"]
+
+  # Definition of the parameter storage and storage data
+
+  rownames <- c("kd", "ks", "nec", "m0")
+  params <- c(kd, ks, nec, m0)
+  CIinf <- c(kdinf, ksinf, necinf, m0inf)
+  CIsup <- c(kdsup, kssup, necsup, m0sup)
+
+  res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
+                    row.names = rownames)
+
+  return(res)
+}
+
+#' Fits a TKTD for survival analysis using Bayesian inference for \code{survDataTKTD} object
+#'
+#' This function estimates the parameters of a TKTD
+#' model for survival analysis using Bayesian inference. In this model,
+#' the survival rate of individuals is modeled as a function of the chemical compound
+#' concentration with a mechanistic description of the effects on survival over
+#' time.
+#' 
+#'
+#' @param data An object of class \code{survData}.
+#' @param n.chains Number of MCMC chains. The minimum required number of chains
+#' is 2.
+#' @param quiet If \code{FALSE}, prints logs and progress bar from JAGS.
+#'
+#' @return The function returns an object of class \code{survFitTKTD}, which is
+#' a list with the following information:
+#' \item{estim.par}{a table of the estimated parameters as medians and 95\%
+#' credible intervals}
+#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
+#' distribution}
+#' \item{warnings}{a table with warning messages}
+#' \item{model}{a JAGS model object}
+#' \item{parameters}{a list of parameter names used in the model}
+#' \item{n.chains}{an integer value corresponding to the number of chains used
+#' for the MCMC computation}
+#' \item{n.iter}{a list of two indices indicating the beginning and the end of
+#' monitored iterations}
+#' \item{n.thin}{a numerical value corresponding to the thinning interval}
+#' \item{jags.data}{a list of data passed to the JAGS model}
+#' 
+#' @references
+#' 
+#' Delignette-Muller ML, Ruiz P and Veber P (2017).
+#' \emph{Robust fit of toxicokinetic-toxicodynamic models using prior knowledge contained in the design of survival toxicity tests.}
+#' 
+#' Bedaux, J., Kooijman, SALM (1994) Statistical analysis of toxicity tests,
+#' based on hazard modeling, \emph{Environmental and Ecological Statistics}, 1,
+#' 303-314.
+#' 
+#' @keywords estimation
+#
+#' @export
+#' @import rjags
+#' @importFrom dplyr group_by summarise filter
+#'
+survFitTKTD <- function(data,
+                        n.chains = 3,
+                        quiet = FALSE) {
+  # test class object
+  if(!is(data, "survData"))
+    stop("survFitTKTD: object of class survData expected")
+
+  # data transformation
+  data <- summarise(group_by(data, conc, time), N_alive = sum(Nsurv))
+
+  n <- nrow(data)
+  data$tprec <- NA
+  data$Nprec <- NA
+  data$N_init <- NA
+  for (i in 1:n)
+  {
+    if (data$time[i] != 0)
+    {
+      data$tprec[i] <- data$time[i - 1]
+      data$Nprec[i] <- data$N_alive[i - 1]
+      data$N_init[i] <- data$N_alive[data$conc == data$conc[i] & data$time == 0]
+    }
+  }
+
+  # control
+  datasurv0 <- subset(data, time == min(data$time[data$time != 0]))
+  datasurv0$time <- 0
+  datasurv0$N_alive <- datasurv0$N_init
+  data[is.na(data$tprec),
+       c("tprec", "Nprec", "N_init")] <- datasurv0[, c("tprec", "Nprec", "N_init")]
+
+  jags.data <- survTKTDCreateJagsData(data, FALSE)
+
+  # Define model
+
+  model <- survLoadModel(model.program = modelTKTDNorm,
+                         data = jags.data,
+                         n.chains,
+                         Nadapt = 3000, quiet)
+
+  # Determine sampling parameters
+  parameters <- c("log10kd", "log10NEC","log10ks", "log10m0")
+
+  sampling.parameters <- modelSamplingParameters(model,
+                                                 parameters, n.chains, quiet)
+
+  if (sampling.parameters$niter > 200000)
+    stop("The model needs too many iterations to provide reliable parameter estimates !")
+
+  # Sampling
+  prog.b <- ifelse(quiet == TRUE, "none", "text")
+
+  mcmc <- coda.samples(model, parameters,
+                       n.iter = sampling.parameters$niter,
+                       thin = sampling.parameters$thin,
+                       progress.bar = prog.b)
+
+  # summarize estime.par et CIs
+  # calculate from the estimated parameters
+  estim.par <- survTKTDPARAMS(mcmc)
+
+  # check the posterior range
+  priorBonds <- survTKTDCreateJagsData(data, TRUE)
+
+  warnings <- msgTableCreate()
+
+  if (log10(estim.par["ks", "Q97.5"]) > priorBonds$log10ksmax){
+    ##store warning in warnings table
+    msg <- "The estimation of the killing rate (model parameter ks) lies outside the range used to define its prior distribution which indicates that this rate is very high and difficult to estimate from this experiment !"
+    warnings <- msgTableAdd(warnings, "ks_outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+
+  if (log10(estim.par["kd", "Q97.5"]) > priorBonds$log10kdmax){
+    ##store warning in warnings table
+    msg <- "The estimation of the dominant rate constant (model parameter kd) lies outside the range used to define its prior distribution which indicates that this rate is very high and difficult to estimate from this experiment !"
+    warnings <- msgTableAdd(warnings, "kd_outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+
+
+  if (log10(estim.par["m0", "Q2.5"]) < priorBonds$log10m0min){
+    ##store warning in warnings table
+    msg <- "The estimation of the natural instantaneous mortality rate (model parameter m0) lies outside the range used to define its prior distribution which indicates that this rate is very low and so difficult to estimate from this experiment !"
+    warnings <- msgTableAdd(warnings, "hb_outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+
+  }
+
+  if (log10(estim.par["nec", "Q2.5"]) < priorBonds$log10necmin ||
+      log10(estim.par["nec", "Q97.5"]) > priorBonds$log10necmax){
+    ##store warning in warnings table
+    msg <- "The NEC estimation (model parameter nec) lies outside the range of tested concentration and may be unreliable as the prior distribution on this parameter is defined from this range !"
+    warnings <- msgTableAdd(warnings, "nec_outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+
+  #OUTPUT
+  OUT <- list(estim.par = estim.par,
+              mcmc = mcmc,
+              model = model,
+              parameters = parameters,
+              n.chains = summary(mcmc)$nchain,
+              n.iter = list(start = summary(mcmc)$start,
+                            end = summary(mcmc)$end),
+              warnings = warnings,
+              n.thin = summary(mcmc)$thin,
+              jags.data = jags.data,
+              transformed.data = data)
+
+  class(OUT) <- "survFitTKTD"
+  return(OUT)
+}
diff --git a/R/survFitTT.survDataCstExp.R b/R/survFitTT.survDataCstExp.R
index 66026bc0f354ec8c1c77aab8a2f5c53f7b8c315d..98822bf73c61e8b63c575a75824534113e3f6ba5 100644
--- a/R/survFitTT.survDataCstExp.R
+++ b/R/survFitTT.survDataCstExp.R
@@ -1,266 +1,251 @@
-#' Fits a Bayesian concentration-response model for target-time survival analysis
-#'
-#' This function estimates the parameters of an concentration-response
-#' model for target-time survival analysis using Bayesian inference. In this model,
-#' the survival rate of individuals at a given time point (called target time) is modeled
-#' as a function of the chemical compound concentration. The actual number of
-#' surviving individuals is then modeled as a stochastic function of the survival
-#' rate. Details of the model are presented in the
-#' vignette accompanying the package.
-#'
-#' The function returns
-#' parameter estimates of the concentration-response model and estimates of the so-called
-#' \eqn{LC_x}, that is the concentration of chemical compound required to get an \eqn{(1 - x/100)} survival rate.
-#'
-#' @param data an object of class \code{survData}
-#' @param target.time the chosen endpoint to evaluate the effect of the chemical compound
-#' concentration, by default the last time point available for
-#' all concentrations
-#' @param lcx desired values of \eqn{x} (in percent) for which to compute
-#' \eqn{LC_x}.
-#' @param n.chains number of MCMC chains, the minimum required number of chains
-#' is 2
-#' @param quiet if \code{TRUE}, does not print messages and progress bars from
-#' JAGS
-#' @param \dots Further arguments to be passed to generic methods
-#'
-#' @return The function returns an object of class \code{survFitTT}, which is a
-#' list with the following information:
-#' \item{estim.LCx}{a table of the estimated \eqn{LC_x} along with their 95\%
-#' credible intervals}
-#' \item{estim.par}{a table of the estimated parameters (medians) and 95\%
-#' credible intervals}
-#' \item{det.part}{the name of the deterministic part of the used model}
-#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
-#' distribution}
-#' \item{warnings}{a table with warning messages}
-#' \item{model}{a JAGS model object}
-#' \item{parameters}{a list of parameter names used in the model}
-#' \item{n.chains}{an integer value corresponding to the number of chains used
-#' for the MCMC computation}
-#' \item{n.iter}{a list of two indices indicating the beginning and the end of
-#' monitored iterations}
-#' \item{n.thin}{a numerical value corresponding to the thinning interval}
-#' \item{jags.data}{a list of the data passed to the JAGS model}
-#' \item{transformed.data}{the \code{survData} object passed to the function}
-#' \item{dataTT}{the dataset with which the parameters are estimated}
-#'
-#' @keywords estimation
-#
-#' @examples
-#'
-#' # (1) Load the data
-#' data(cadmium1)
-#'
-#' # (2) Create an object of class "survData"
-#' dat <- survData(cadmium1)
-#'
-#' \donttest{
-#' # (3) Run the survFitTT function with the log-logistic
-#' #     binomial model
-#' out <- survFitTT(dat, lcx = c(5, 10, 15, 20, 30, 50, 80),
-#'                  quiet = TRUE)
-#' }
-#'
-#' @import rjags
-#' @importFrom dplyr filter
-#'
-#' @export
-survFitTT.survDataCstExp <- function(data,
-                                     target.time = NULL,
-                                     lcx = c(5, 10, 20, 50),
-                                     n.chains = 3,
-                                     quiet = FALSE,
-                                     ...) {
-  # test class object
-  if(! is(data, "survDataCstExp"))
-    stop("survFitTT: object of class survDataCstExp expected")
-
-  # select Data at target.time and pool replicates
-  dataTT <- selectDataTT(data, target.time)
-  
-  # Gather replicates according to time and conc
-  dataTT <- cbind(aggregate(cbind(Nsurv, Ninit) ~ time + conc, dataTT, sum), replicate = 1)
-
-
-  # Choose model by testing mortality in the control
-  control <- filter(dataTT, conc == 0)
-  det.part <-
-    if (any(control$Nsurv < control$Ninit)) "loglogisticbinom_3"
-  else "loglogisticbinom_2"
-
-  # select model text
-  if (det.part == "loglogisticbinom_2") {
-    model.text <- llbinom2.model.text
-  }
-  if (det.part == "loglogisticbinom_3") {
-    model.text <- llbinom3.model.text
-  }
-
-  # parameters
-  parameters <- if (det.part == "loglogisticbinom_2") {
-    c("log10b", "log10e")
-  } else {
-    if (det.part == "loglogisticbinom_3") {
-      c("log10b", "d", "log10e")}
-  }
-
-  # create priors parameters
-  jags.data <- survCreateJagsData(det.part, dataTT)
-
-  # Define model
-  model <- survLoadModel(model.program = model.text,
-                         data = jags.data, n.chains,
-                         Nadapt = 3000, quiet)
-
-  # Determine sampling parameters
-  sampling.parameters <- modelSamplingParameters(model,
-                                                 parameters, n.chains, quiet)
-
-  if (sampling.parameters$niter > 100000)
-    stop("The model needs too many iterations to provide reliable parameter estimates !")
-
-  # Sampling
-  prog.b <- ifelse(quiet == TRUE, "none", "text")
-
-  mcmc <- coda.samples(model, parameters,
-                       n.iter = sampling.parameters$niter,
-                       thin = sampling.parameters$thin,
-                       progress.bar = prog.b)
-
-  # summarize estime.par et CIs
-  # calculate from the estimated parameters
-  estim.par <- survPARAMS(mcmc, det.part)
-
-  # LCx calculation  estimated LCx and their CIs 95%
-  # vector of LCX
-  estim.LCx <- estimXCX(mcmc, lcx, "LC")
-
-  # check if estimated LC50 lies in the tested concentration range
-
-  warnings <- msgTableCreate()
-
-  LC50 <- log10(estim.par["e", "median"])
-  if (!(min(log10(data$conc)) < LC50 & LC50 < max(log10(data$conc)))){
-    ##store warning in warnings table
-    msg <- "The LC50 estimation (model parameter e) lies outside the range of tested concentrations and may be unreliable as the prior distribution on this parameter is defined from this range !"
-    warnings <- msgTableAdd(warnings, "LC50outRange", msg)
-    ## print the message
-    warning(msg, call. = FALSE)
-  }
-
-  # output
-  OUT <- list(estim.LCx = estim.LCx,
-              estim.par = estim.par,
-              det.part = det.part,
-              mcmc = mcmc,
-              warnings = warnings,
-              model = model,
-              parameters = parameters,
-              n.chains = summary(mcmc)$nchain,
-              n.iter = list(start = summary(mcmc)$start,
-                            end = summary(mcmc)$end),
-              n.thin = summary(mcmc)$thin,
-              jags.data = jags.data,
-              transformed.data = data,
-              dataTT = dataTT)
-
-  class(OUT) <- "survFitTT"
-  return(OUT)
-}
-
-survCreateJagsData <- function(det.part, data) {
-  # Creates the parameters to define the prior of the log-logistic binomial model
-  # INPUTS
-  # det.part: model name
-  # data: object of class survData
-  # OUTPUT
-  # jags.data : list of data required for the jags.model function
-
-  # Parameter calculation of concentration min and max
-  concmin <- min(sort(unique(data$conc))[-1])
-  concmax <- max(data$conc)
-
-  # Create prior parameters for the log logistic model
-
-  # Params to define e
-  meanlog10e <- (log10(concmin) + log10(concmax)) / 2
-  sdlog10e <- (log10(concmax) - log10(concmin)) / 4
-  taulog10e <- 1 / sdlog10e^2
-
-  # Params to define b
-  log10bmin <- -2
-  log10bmax <- 2
-
-  # list of data use by jags
-  jags.data <- list(meanlog10e = meanlog10e,
-                    Ninit = data$Ninit,
-                    Nsurv = data$Nsurv,
-                    taulog10e = taulog10e,
-                    log10bmin = log10bmin,
-                    log10bmax = log10bmax,
-                    n = length(data$conc),
-                    xconc = data$conc)
-
-  # list of data use by jags
-  if (det.part == "loglogisticbinom_3") {
-    jags.data <- c(jags.data,
-                   dmin = 0,
-                   dmax = 1)
-  }
-  return(jags.data)
-}
-
-survPARAMS <- function(mcmc, det.part) {
-  # create the table of posterior estimated parameters
-  # for the survival analyses
-  # INPUT:
-  # - mcmc:  list of estimated parameters for the model with each item representing
-  # a chain
-  # OUTPUT:
-  # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
-  # parameters)
-
-  # Retrieving parameters of the model
-  res.M <- summary(mcmc)
-
-  if (det.part ==  "loglogisticbinom_3") {
-    d <- res.M$quantiles["d", "50%"]
-    dinf <- res.M$quantiles["d", "2.5%"]
-    dsup <- res.M$quantiles["d", "97.5%"]
-  }
-  # for loglogisticbinom_2 and 3
-  b <- 10^res.M$quantiles["log10b", "50%"]
-  e <- 10^res.M$quantiles["log10e", "50%"]
-  binf <- 10^res.M$quantiles["log10b", "2.5%"]
-  einf <- 10^res.M$quantiles["log10e", "2.5%"]
-  bsup <- 10^res.M$quantiles["log10b", "97.5%"]
-  esup <- 10^res.M$quantiles["log10e", "97.5%"]
-
-  # Definition of the parameter storage and storage data
-  # If Poisson Model
-
-  if (det.part == "loglogisticbinom_3") {
-    # if mortality in control
-    rownames <- c("b", "d", "e")
-    params <- c(b, d, e)
-    CIinf <- c(binf, dinf, einf)
-    CIsup <- c(bsup, dsup, esup)
-  } else {
-    # if no mortality in control
-    # Definition of the parameter storage and storage data
-    rownames <- c("b", "e")
-    params <- c(b, e)
-    CIinf <- c(binf, einf)
-    CIsup <- c(bsup, esup)
-  }
-
-  res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
-                    row.names = rownames)
-
-  return(res)
-}
-
-llbinom3.model.text <- "\nmodel # Loglogistic binomial model with 3 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- d/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nd ~ dunif(dmin, dmax)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
-
-llbinom2.model.text <- "\nmodel # Loglogistic binomial model with 2 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- 1/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
+#' Fits a Bayesian concentration-response model for target-time survival analysis
+#'
+#' This function estimates the parameters of an concentration-response
+#' model for target-time survival analysis using Bayesian inference. In this model,
+#' the survival rate of individuals at a given time point (called target time) is modeled
+#' as a function of the chemical compound concentration. The actual number of
+#' surviving individuals is then modeled as a stochastic function of the survival
+#' rate. Details of the model are presented in the
+#' vignette accompanying the package.
+#'
+#' The function returns
+#' parameter estimates of the concentration-response model and estimates of the so-called
+#' \eqn{LC_x}, that is the concentration of chemical compound required to get an \eqn{(1 - x/100)} survival rate.
+#'
+#' @param data an object of class \code{survData}
+#' @param target.time the chosen endpoint to evaluate the effect of the chemical compound
+#' concentration, by default the last time point available for
+#' all concentrations
+#' @param lcx desired values of \eqn{x} (in percent) for which to compute
+#' \eqn{LC_x}.
+#' @param n.chains number of MCMC chains, the minimum required number of chains
+#' is 2
+#' @param quiet if \code{TRUE}, does not print messages and progress bars from
+#' JAGS
+#' @param \dots Further arguments to be passed to generic methods
+#'
+#' @return The function returns an object of class \code{survFitTT}, which is a
+#' list with the following information:
+#' \item{estim.LCx}{a table of the estimated \eqn{LC_x} along with their 95\%
+#' credible intervals}
+#' \item{estim.par}{a table of the estimated parameters (medians) and 95\%
+#' credible intervals}
+#' \item{det.part}{the name of the deterministic part of the used model}
+#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
+#' distribution}
+#' \item{warnings}{a table with warning messages}
+#' \item{model}{a JAGS model object}
+#' \item{parameters}{a list of parameter names used in the model}
+#' \item{n.chains}{an integer value corresponding to the number of chains used
+#' for the MCMC computation}
+#' \item{n.iter}{a list of two indices indicating the beginning and the end of
+#' monitored iterations}
+#' \item{n.thin}{a numerical value corresponding to the thinning interval}
+#' \item{jags.data}{a list of the data passed to the JAGS model}
+#' \item{transformed.data}{the \code{survData} object passed to the function}
+#' \item{dataTT}{the dataset with which the parameters are estimated}
+#'
+#' @keywords estimation
+#
+#' @import rjags
+#' @importFrom dplyr filter
+#'
+#' @export
+survFitTT.survDataCstExp <- function(data,
+                                     target.time = NULL,
+                                     lcx = c(5, 10, 20, 50),
+                                     n.chains = 3,
+                                     quiet = FALSE,
+                                     ...) {
+  # test class object
+  if(! is(data, "survDataCstExp"))
+    stop("survFitTT: object of class survDataCstExp expected")
+
+  # select Data at target.time and pool replicates
+  dataTT <- selectDataTT(data, target.time)
+  
+  # Gather replicates according to time and conc
+  dataTT <- cbind(aggregate(cbind(Nsurv, Ninit) ~ time + conc, dataTT, sum), replicate = 1)
+
+
+  # Choose model by testing mortality in the control
+  control <- filter(dataTT, conc == 0)
+  det.part <-
+    if (any(control$Nsurv < control$Ninit)) "loglogisticbinom_3"
+  else "loglogisticbinom_2"
+
+  # select model text
+  if (det.part == "loglogisticbinom_2") {
+    model.text <- llbinom2.model.text
+  }
+  if (det.part == "loglogisticbinom_3") {
+    model.text <- llbinom3.model.text
+  }
+
+  # parameters
+  parameters <- if (det.part == "loglogisticbinom_2") {
+    c("log10b", "log10e")
+  } else {
+    if (det.part == "loglogisticbinom_3") {
+      c("log10b", "d", "log10e")}
+  }
+
+  # create priors parameters
+  jags.data <- survCreateJagsData(det.part, dataTT)
+
+  # Define model
+  model <- survLoadModel(model.program = model.text,
+                         data = jags.data, n.chains,
+                         Nadapt = 3000, quiet)
+
+  # Determine sampling parameters
+  sampling.parameters <- modelSamplingParameters(model,
+                                                 parameters, n.chains, quiet)
+
+  if (sampling.parameters$niter > 100000)
+    stop("The model needs too many iterations to provide reliable parameter estimates !")
+
+  # Sampling
+  prog.b <- ifelse(quiet == TRUE, "none", "text")
+
+  mcmc <- coda.samples(model, parameters,
+                       n.iter = sampling.parameters$niter,
+                       thin = sampling.parameters$thin,
+                       progress.bar = prog.b)
+
+  # summarize estime.par et CIs
+  # calculate from the estimated parameters
+  estim.par <- survPARAMS(mcmc, det.part)
+
+  # LCx calculation  estimated LCx and their CIs 95%
+  # vector of LCX
+  estim.LCx <- estimXCX(mcmc, lcx, "LC")
+
+  # check if estimated LC50 lies in the tested concentration range
+
+  warnings <- msgTableCreate()
+
+  LC50 <- log10(estim.par["e", "median"])
+  if (!(min(log10(data$conc)) < LC50 & LC50 < max(log10(data$conc)))){
+    ##store warning in warnings table
+    msg <- "The LC50 estimation (model parameter e) lies outside the range of tested concentrations and may be unreliable as the prior distribution on this parameter is defined from this range !"
+    warnings <- msgTableAdd(warnings, "LC50outRange", msg)
+    ## print the message
+    warning(msg, call. = FALSE)
+  }
+
+  # output
+  OUT <- list(estim.LCx = estim.LCx,
+              estim.par = estim.par,
+              det.part = det.part,
+              mcmc = mcmc,
+              warnings = warnings,
+              model = model,
+              parameters = parameters,
+              n.chains = summary(mcmc)$nchain,
+              n.iter = list(start = summary(mcmc)$start,
+                            end = summary(mcmc)$end),
+              n.thin = summary(mcmc)$thin,
+              jags.data = jags.data,
+              transformed.data = data,
+              dataTT = dataTT)
+
+  class(OUT) <- "survFitTT"
+  return(OUT)
+}
+
+survCreateJagsData <- function(det.part, data) {
+  # Creates the parameters to define the prior of the log-logistic binomial model
+  # INPUTS
+  # det.part: model name
+  # data: object of class survData
+  # OUTPUT
+  # jags.data : list of data required for the jags.model function
+
+  # Parameter calculation of concentration min and max
+  concmin <- min(sort(unique(data$conc))[-1])
+  concmax <- max(data$conc)
+
+  # Create prior parameters for the log logistic model
+
+  # Params to define e
+  meanlog10e <- (log10(concmin) + log10(concmax)) / 2
+  sdlog10e <- (log10(concmax) - log10(concmin)) / 4
+  taulog10e <- 1 / sdlog10e^2
+
+  # Params to define b
+  log10bmin <- -2
+  log10bmax <- 2
+
+  # list of data use by jags
+  jags.data <- list(meanlog10e = meanlog10e,
+                    Ninit = data$Ninit,
+                    Nsurv = data$Nsurv,
+                    taulog10e = taulog10e,
+                    log10bmin = log10bmin,
+                    log10bmax = log10bmax,
+                    n = length(data$conc),
+                    xconc = data$conc)
+
+  # list of data use by jags
+  if (det.part == "loglogisticbinom_3") {
+    jags.data <- c(jags.data,
+                   dmin = 0,
+                   dmax = 1)
+  }
+  return(jags.data)
+}
+
+survPARAMS <- function(mcmc, det.part) {
+  # create the table of posterior estimated parameters
+  # for the survival analyses
+  # INPUT:
+  # - mcmc:  list of estimated parameters for the model with each item representing
+  # a chain
+  # OUTPUT:
+  # - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
+  # parameters)
+
+  # Retrieving parameters of the model
+  res.M <- summary(mcmc)
+
+  if (det.part ==  "loglogisticbinom_3") {
+    d <- res.M$quantiles["d", "50%"]
+    dinf <- res.M$quantiles["d", "2.5%"]
+    dsup <- res.M$quantiles["d", "97.5%"]
+  }
+  # for loglogisticbinom_2 and 3
+  b <- 10^res.M$quantiles["log10b", "50%"]
+  e <- 10^res.M$quantiles["log10e", "50%"]
+  binf <- 10^res.M$quantiles["log10b", "2.5%"]
+  einf <- 10^res.M$quantiles["log10e", "2.5%"]
+  bsup <- 10^res.M$quantiles["log10b", "97.5%"]
+  esup <- 10^res.M$quantiles["log10e", "97.5%"]
+
+  # Definition of the parameter storage and storage data
+  # If Poisson Model
+
+  if (det.part == "loglogisticbinom_3") {
+    # if mortality in control
+    rownames <- c("b", "d", "e")
+    params <- c(b, d, e)
+    CIinf <- c(binf, dinf, einf)
+    CIsup <- c(bsup, dsup, esup)
+  } else {
+    # if no mortality in control
+    # Definition of the parameter storage and storage data
+    rownames <- c("b", "e")
+    params <- c(b, e)
+    CIinf <- c(binf, einf)
+    CIsup <- c(bsup, esup)
+  }
+
+  res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
+                    row.names = rownames)
+
+  return(res)
+}
+
+llbinom3.model.text <- "\nmodel # Loglogistic binomial model with 3 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- d/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nd ~ dunif(dmin, dmax)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
+
+llbinom2.model.text <- "\nmodel # Loglogistic binomial model with 2 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- 1/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"