# Load libraries for data processing, modelling, and visualisation
library(tidyverse)
library(openxlsx2)
library(MASS)
library(corrplot)
library(DT)
library(climwin)
library(jtools)
library(DHARMa)
library(lavaan)
library(DiagrammeR)
library(lmtest)
library(glmmTMB)
library(scales)
library(performance)
# Load daily extreme weather event (EWE) data
extreme_weather <- wb_to_df("Breeding_colony_ewes/Mewstone_ewes.xlsx") %>%
dplyr::mutate(across(4:19, ~ ifelse(is.na(.), 0, .)))
# Create binary version: 1 = event occurred, 0 = no event, NA = missing
extreme_weather_binary <- extreme_weather %>%
dplyr::mutate(across(4:19, ~ ifelse(!is.na(.) & . != 0, 1, ifelse(is.na(.), NA, 0))))
# Note:
# Missing values in extreme weather data are replaced with zero. This is critical because the slidingwin method ("method1" and "method2") internally calculates means when NA values are present, which is not suitable when assessing extreme values. We are specifically interested in whether an extreme event occurred, not in average conditions.
# A small number of missing values are present in the dataset, and replacing them with zero ensures consistency without introducing bias in this context.
# Load breeding data
breeding_data <- wb_to_df("Breeding_data/SHAL.xlsx", sheet = "Mewstone") %>%
dplyr::filter(!is.na(chicks)) # Remove seasons without productivity data
# Record sample size
sample_size <- nrow(breeding_data)
# Assess normality of response variable
# If p > 0.05, the data does not significantly deviate from normality.
shapiro.test(breeding_data$chicks)
##
## Shapiro-Wilk normality test
##
## data: breeding_data$chicks
## W = 0.8807, p-value = 0.05947
# Histogram with density curve
hist(breeding_data$chicks,
main = "Histogram of Chick Count",
xlab = "Chick Count",
col = "#a6d6fa",
border = "white",
prob = TRUE
)
# Overlay kernel density estimate
lines(density(breeding_data$chicks, na.rm = TRUE), col = "#0D92F4", lwd = 2)
# Q-Q plot
ggplot(breeding_data, aes(sample = chicks)) +
stat_qq() +
stat_qq_line(colour = "red") +
labs(title = "Q-Q Plot of Chick count",
x = "Theoretical Quantiles",
y = "Sample Quantiles") +
theme_classic()
# Calculate mean and variance
mean(breeding_data$chicks)
## [1] 3783.929
var(breeding_data$chicks)
## [1] 554590.5
The Shapiro–Wilk test did not indicate a significant deviation from normality (W = 0.8807, p = 0.05947); therefore, we fail to reject the null hypothesis that the data are normally distributed.
Furthermore, the mean (3783.929) and variance (554590.5) differ substantially, indicating overdispersion and suggesting that a Poisson distribution—where the mean and variance are expected to be equal—is not appropriate for this dataset. Therefore, we will use a Negative Binomial distribution to model the data.
# Run the sliding window analysis using actual (non-binary) values
output1 <- slidingwin(xvar = list(warm_day = extreme_weather$warm_day,
warm_night = extreme_weather$warm_night,
heatwave = extreme_weather$heatwave,
cool_day = extreme_weather$cool_day,
cool_night = extreme_weather$cool_night,
coldwave = extreme_weather$coldwave,
wet_day = extreme_weather$wet_day,
heavy_rain_day = extreme_weather$heavy_rain_day,
very_heavy_rain_day = extreme_weather$very_heavy_rain_day,
ewdp = extreme_weather$ewdp,
vwdp = extreme_weather$vwdp,
extreme_wind_day = extreme_weather$extreme_wind_day,
extreme_wbt_day = extreme_weather$extreme_wbt_day,
extreme_wbgt_day = extreme_weather$extreme_wbgt_day,
extreme_at_day = extreme_weather$extreme_at_day,
extreme_wind_chill_day = extreme_weather$extreme_wind_chill_day),
cdate = extreme_weather$date, # Climate date
bdate = breeding_data$date, # Biological event date
baseline = glm.nb(chicks ~ 1,
link = "log",
data = breeding_data), # Baseline model
cohort = breeding_data$season, # Group by season
cinterval = "day", # Daily resolution
range = c(201, 0), # Check windows from 1 October to 20 April
refday = c(20, 04), # Reference date: 20 April
type = "absolute", # Absolute window type
stat = "sum", # Sum values within each window
func = "lin" # For linear relationship
)
# Run the sliding window analysis using binary event indicators
output2 <- slidingwin(xvar = list(warm_day_bi = extreme_weather_binary$warm_day,
warm_night_bi = extreme_weather_binary$warm_night,
heatwave_bi = extreme_weather_binary$heatwave,
cool_day_bi = extreme_weather_binary$cool_day,
cool_night_bi = extreme_weather_binary$cool_night,
coldwave_bi = extreme_weather_binary$coldwave,
wet_day_bi = extreme_weather_binary$wet_day,
heavy_rain_day_bi = extreme_weather_binary$heavy_rain_day,
very_heavy_rain_day_bi = extreme_weather_binary$very_heavy_rain_day,
ewdp_bi = extreme_weather_binary$ewdp,
vwdp_bi = extreme_weather_binary$vwdp,
extreme_wind_day_bi = extreme_weather_binary$extreme_wind_day,
extreme_wbt_day_bi = extreme_weather_binary$extreme_wbt_day,
extreme_wbgt_day_bi = extreme_weather_binary$extreme_wbgt_day,
extreme_at_day_bi = extreme_weather_binary$extreme_at_day,
extreme_wind_chill_day_bi = extreme_weather_binary$extreme_wind_chill_day),
cdate = extreme_weather_binary$date, # Climate date
bdate = breeding_data$date, # Biological event date
baseline = glm.nb(chicks ~ 1,
link = "log",
data = breeding_data), # Baseline model
cohort = breeding_data$season, # Group by season
cinterval = "day", # Daily resolution
range = c(201, 0), # Check windows from 1 October to 20 April
refday = c(20, 04), # Reference date: 20 April
type = "absolute", # Absolute window type
stat = "sum", # Sum values within each window
func = "lin" # For linear relationship
)
# Combine output from actual and binary sliding window analyses
output <- merge_results(output1, output2)
# View merged model combinations with calculated window duration
datatable(output$combos %>%
dplyr::mutate(WindowDuration = WindowOpen - WindowClose + 1),
options = list(pageLength = 10, orderClasses = TRUE))
Before running the randomisation process, we need to identify the best-performing model for each extreme weather variable. This ensures that we are testing the most likely biologically relevant window against random expectation.
What we are doing here: For each weather variable (e.g., heavy rain, wet days), we extract the model with:
The lowest AIC value, and
A window duration longer than 14 days, to focus on ecologically meaningful timeframes.
These best models represent the strongest climate–breeding success relationships, and will be used for the randomisation test to assess whether the relationship is likely to have occurred by chance.
# Summary of the best model
summary(output[[13]]$BestModel)
##
## Call:
## glm.nb(formula = yvar ~ climate, data = modeldat, init.theta = 65.8204625,
## link = "log")
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 8.438384 0.051226 164.727 < 2e-16 ***
## climate -0.010581 0.001929 -5.487 4.1e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(65.8205) family taken to be 1)
##
## Null deviance: 40.529 on 13 degrees of freedom
## Residual deviance: 14.057 on 12 degrees of freedom
## AIC: 217.51
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 65.8
## Std. Err.: 25.3
##
## 2 x log-likelihood: -211.51
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[13]]$Dataset)
## $`Median Window Open`
## [1] 141
##
## $`Median Window Close`
## [1] 50
# Randomisation test to assess if the detected signal is likely by chance
extreme_wbt_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(extreme_wbt_day = extreme_weather$extreme_wbt_day),
cdate = extreme_weather$date,
bdate = breeding_data$date,
baseline = glm.nb(chicks ~ 1,
link = "log",
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
range = c(201, 0),
refday = c(20, 04),
type = "absolute",
stat = "sum",
func = "lin"
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[13]]$Dataset,
datasetrand = extreme_wbt_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.5869934
# Plot sliding window and randomisation result
climwin::plotall(dataset = output[[13]]$Dataset,
datasetrand = extreme_wbt_randwin[[1]],
bestmodel = output[[13]]$BestModel,
bestmodeldata = output[[13]]$BestModelData,
arrow = TRUE
)
# Summary of the best model
summary(output[[28]]$BestModel)
##
## Call:
## glm.nb(formula = yvar ~ climate, data = modeldat, init.theta = 59.25411465,
## link = "log")
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 8.49521 0.06427 132.179 < 2e-16 ***
## climate -0.05737 0.01146 -5.008 5.5e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(59.2541) family taken to be 1)
##
## Null deviance: 36.558 on 13 degrees of freedom
## Residual deviance: 14.071 on 12 degrees of freedom
## AIC: 218.97
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 59.3
## Std. Err.: 22.7
##
## 2 x log-likelihood: -212.974
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[28]]$Dataset)
## $`Median Window Open`
## [1] 136
##
## $`Median Window Close`
## [1] 64
# Randomisation test to assess if the detected signal is likely by chance
extreme_wind_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(extreme_wind_day_bi = extreme_weather_binary$extreme_wind_day),
cdate = extreme_weather_binary$date,
bdate = breeding_data$date,
baseline = glm.nb(chicks ~ 1,
link = "log",
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
range = c(201, 0),
refday = c(20, 04),
type = "absolute",
stat = "sum",
func = "lin"
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[28]]$Dataset,
datasetrand = extreme_wind_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.5155516
# Plot sliding window and randomisation result
climwin::plotall(dataset = output[[28]]$Dataset,
datasetrand = extreme_wind_randwin[[1]],
bestmodel = output[[28]]$BestModel,
bestmodeldata = output[[28]]$BestModelData,
arrow = TRUE
)
# Summary of the best model
summary(output[[7]]$BestModel)
##
## Call:
## glm.nb(formula = yvar ~ climate, data = modeldat, init.theta = 107.9364653,
## link = "log")
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 8.837487 0.084567 104.502 < 2e-16 ***
## climate -0.007954 0.001040 -7.649 2.02e-14 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(107.9365) family taken to be 1)
##
## Null deviance: 65.630 on 13 degrees of freedom
## Residual deviance: 14.042 on 12 degrees of freedom
## AIC: 210.72
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 107.9
## Std. Err.: 42.0
##
## 2 x log-likelihood: -204.716
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[7]]$Dataset)
## $`Median Window Open`
## [1] 141
##
## $`Median Window Close`
## [1] 103
# Randomisation test to assess if the detected signal is likely by chance
wet_day_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(wet_day = extreme_weather$wet_day),
cdate = extreme_weather$date,
bdate = breeding_data$date,
baseline = glm.nb(chicks ~ 1,
link = "log",
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
range = c(201, 0),
refday = c(20, 04),
type = "absolute",
stat = "sum",
func = "lin"
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[7]]$Dataset,
datasetrand = wet_day_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.01771165
# Plot sliding window and randomisation result
plotall(dataset = output[[7]]$Dataset,
datasetrand = wet_day_randwin[[1]],
bestmodel = output[[7]]$BestModel,
bestmodeldata = output[[7]]$BestModelData,
arrow = TRUE
)
# k-fold cross-validation allows to improve the accuracy of the R^2 estimate as R^2 estimates using slidingwin can be biased at low sample size and/or effect size
wet_day_k_fold <- slidingwin(k = 10,
xvar = list(wet_day = extreme_weather$wet_day),
cdate = extreme_weather$date,
bdate = breeding_data$date,
baseline = glm.nb(chicks ~ 1,
link = "log",
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
range = c(201, 0),
refday = c(20, 04),
type = "absolute",
stat = "sum",
func = "lin"
)
# Summary of the best model from k-fold cross-validation
summary(wet_day_k_fold[[1]]$BestModel)
##
## Call:
## glm.nb(formula = yvar ~ climate, data = modeldat, init.theta = 32.44814064,
## link = "log")
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 8.357872 0.068781 121.514 <2e-16 ***
## climate -0.015407 0.006127 -2.515 0.0119 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(32.4481) family taken to be 1)
##
## Null deviance: 20.183 on 13 degrees of freedom
## Residual deviance: 14.092 on 12 degrees of freedom
## AIC: 227.35
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 32.4
## Std. Err.: 12.3
##
## 2 x log-likelihood: -221.355
# Calculate Nagelkerke's R² for the best model from k-fold cross-validation
r2(wet_day_k_fold[[1]]$BestModel)
## # R2 for Generalized Linear Regression
## Nagelkerke's R2: 0.462
Check for colinearity between the climate signals.
# Add climate signals to the original breeding data
breeding_data <- breeding_data %>%
dplyr::mutate(wet_day_signal = output[[7]]$BestModelData$climate)
# Final model
final_model <- glm.nb(chicks ~ 1 + wet_day_signal,
link = "log",
data = breeding_data)
# Summary of the final model
summary(final_model)
##
## Call:
## glm.nb(formula = chicks ~ 1 + wet_day_signal, data = breeding_data,
## link = "log", init.theta = 107.9364653)
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 8.837487 0.084567 104.502 < 2e-16 ***
## wet_day_signal -0.007954 0.001040 -7.649 2.02e-14 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(107.9365) family taken to be 1)
##
## Null deviance: 65.630 on 13 degrees of freedom
## Residual deviance: 14.042 on 12 degrees of freedom
## AIC: 210.72
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 107.9
## Std. Err.: 42.0
##
## 2 x log-likelihood: -204.716
# Creates scaled residuals by simulating from the fitted model
simulationOutput <- simulateResiduals(fittedModel = final_model, plot = TRUE)
# Test for over/underdispersion
testDispersion(simulationOutput, plot = TRUE)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99646, p-value = 0.864
## alternative hypothesis: two.sided
# Plot the fitted effect
effect_plot(final_model, pred = wet_day_signal, interval = TRUE, plot.points = TRUE,
main.title = "Relationship between Chick count and Wet day",
x.label = "Sum of wet day precipitation (mm)",
y.label = "Chick count",
colors = c("#7B8FA1"),
line.colors = c("#0D92F4"),
line.thickness = 1,
point.size = 2.5,
point.alpha = 0.5,
rug = TRUE) +
drop_gridlines() +
theme_classic()
# Save the final model data with climate signals
write_xlsx(breeding_data, "Output_data/SHAL/SHAL_Mewstone_signal.xlsx")
# Save 95% confidence set for each climate signal
saveRDS(output[[7]]$Dataset, "Output_data/SHAL/Mewstone_wet_day_dataset.rds")
# Checking trends in cumulative intensity, frequency, and duration for each climate signal (using row data)
# Only interested during identified median climate window (95% CI from the best model) from reference date
# Fixed reference date: 20 April each year
ref_day <- "04-20"
# Create yearly open/close window lookup
window_dates <- data.frame(
season = 1970:2024,
window_open = as.Date(paste0(1970:2024, "-", ref_day)) - 141,
window_close = as.Date(paste0(1970:2024, "-", ref_day)) - 103
)
# Add month-day format for easy matching
window_dates <- window_dates %>%
mutate(
window_open_md = format(window_open, "%m-%d"),
window_close_md = format(window_close, "%m-%d")
)
# Summarise extreme events per season within the fixed median window
summarised_wet_days <- extreme_weather %>%
filter(season >= 1970 & season <= 2024) %>%
# Join to add window dates per season
left_join(window_dates, by = "season") %>%
# Filter rows within the per-season open/close window
filter(
format(date, "%m-%d") >= window_open_md | format(date, "%m-%d") <= window_close_md
) %>%
group_by(season) %>%
arrange(season, date) %>%
summarise(
# Extract daily index values for the current season
values = list(wet_day),
# Identify run lengths of contiguous non-zero values
rle_obj = list(rle(values[[1]] > 0)),
# Compute event-level summaries
extreme_event_summaries = list({
vals <- values[[1]]
runs <- rle_obj[[1]]
if (any(runs$values)) {
starts <- cumsum(c(1, head(runs$lengths, -1)))[runs$values]
lengths <- runs$lengths[runs$values]
event_sums <- mapply(function(start, len) sum(vals[start:(start + len - 1)], na.rm = TRUE),
starts, lengths)
tibble::tibble(
event_intensity = event_sums,
event_duration = lengths
)
} else {
tibble::tibble(event_intensity = numeric(0), event_duration = numeric(0))
}
}),
# Derived summaries
wet_day_frequency = nrow(extreme_event_summaries[[1]]),
wet_day_cum_intensity = sum(extreme_event_summaries[[1]]$event_intensity, na.rm = TRUE),
wet_day_tot_duration = sum(extreme_event_summaries[[1]]$event_duration, na.rm = TRUE),
.groups = "drop"
) %>%
dplyr::select(season, wet_day_frequency, wet_day_cum_intensity, wet_day_tot_duration)
############################## Write function to run Durbin-Watson test ##############################
# Run Durbin-Watson test for autocorrelation
run_dw_test <- function(data, vars) {
results <- lapply(vars, function(var) {
formula_obj <- as.formula(paste(var, "~ season"))
model <- lm(formula_obj, data = data)
dw <- dwtest(model)
data.frame(
variable = var,
DW_statistic = round(dw$statistic[[1]], 3),
p_value = round(dw$p.value, 4),
autocorrelation = ifelse(dw$p.value < 0.05,
ifelse(dw$statistic < 2, "positive", "negative"),
"none"),
row.names = NULL
)
})
do.call(rbind, results)
}
# Check for autocorrelation in breeding success
run_dw_test(breeding_data, c("chicks"))
## variable DW_statistic p_value autocorrelation
## 1 chicks 1.225 0.0281 positive
# Check for autocorrelation in extreme weather variables
run_dw_test(summarised_wet_days, c("wet_day_frequency",
"wet_day_cum_intensity",
"wet_day_tot_duration"))
## variable DW_statistic p_value autocorrelation
## 1 wet_day_frequency 2.095 0.5839 none
## 2 wet_day_cum_intensity 2.087 0.5718 none
## 3 wet_day_tot_duration 2.724 0.9960 none
# Function to fit glmmTMB with autocorrelation
fit_glmmTMB_with_ar1 <- function(data, variables_families) {
for (entry in variables_families) {
var <- entry$var
fam <- entry$family
model <- tryCatch({
glmmTMB::glmmTMB(
formula = as.formula(paste(var, "~ season + ar1(season + 0 | 1)")),
data = data,
family = fam
)
}, error = function(e) {
message("Could not fit model for ", var, ": ", e$message)
return(NULL)
})
if (is.null(model)) next
# Summary
cat("\n============================\n")
cat("Model summary for:", var, "\n")
print(summary(model))
# Extract p-value safely
coefs <- summary(model)$coefficients$cond
season_row <- grep("^season$", rownames(coefs))
p <- coefs[season_row, "Pr(>|z|)"]
# Base plot
p_plot <- ggplot(data, aes(x = season, y = .data[[var]])) +
geom_point(shape = 19, size = 2.5, color = "#7B8FA1") +
geom_line(linewidth = 0.5, color = "#7B8FA1", na.rm = TRUE)
# Add model trend and annotation only if p < 0.05
if (!is.na(p) && p < 0.05) {
# Predictions
newdata <- data.frame(season = seq(min(data$season), max(data$season)))
preds <- predict(model, newdata = newdata, se.fit = TRUE, type = "response")
# Annotation
p_label <- if (p < 0.001) "p < 0.001" else paste0("p = ", round(p, 3))
n <- nrow(data)
ann_text <- paste0("n = ", n, "\n", p_label)
p_plot <- p_plot +
geom_ribbon(data = data.frame(
season = newdata$season,
ymin = preds$fit - 1.96 * preds$se.fit,
ymax = preds$fit + 1.96 * preds$se.fit
), aes(x = season, ymin = ymin, ymax = ymax),
fill = "#B6D0E2", alpha = 0.4, inherit.aes = FALSE) +
geom_line(data = data.frame(
season = newdata$season,
fit = preds$fit
), aes(x = season, y = fit),
color = "#0D92F4", linewidth = 0.75, inherit.aes = FALSE) +
annotate("text",
x = Inf, y = -Inf,
hjust = 1.1, vjust = -0.2,
size = 3, color = "#1a1a1a",
label = ann_text)
}
# Final styling and print
p_plot <- p_plot +
scale_x_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
labs(
title = paste("Trend in", gsub("_", " ", var)),
x = "Season", y = var
) +
theme_classic(base_family = "Helvetica") +
theme(
plot.title = element_text(size = 10, margin = margin(b = 1), colour = "#0d0d0d"),
axis.title = element_text(size = 9, colour = "#0d0d0d"),
axis.text = element_text(size = 8),
axis.ticks.length = unit(1, "pt"),
axis.ticks = element_line(linewidth = 0.5),
axis.line = element_blank(),
panel.border = element_rect(color = "#1a1a1a", fill = NA, size = 0.25)
)
print(p_plot)
}
}
# Fit glmmTMB and plot
fit_glmmTMB_with_ar1(breeding_data,
variables_families = list(list(var = "chicks",
family = nbinom2())
)
)
##
## ============================
## Model summary for: chicks
## Family: nbinom2 ( log )
## Formula: chicks ~ season + ar1(season + 0 | 1)
## Data: data
##
## AIC BIC logLik -2*log(L) df.resid
## 230.7 232.7 -112.4 224.7 11
##
##
## Dispersion parameter for nbinom2 family (): 25.5
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -17.806039 19.363255 -0.920 0.358
## season 0.012922 0.009608 1.345 0.179
# Function to fit glmmTMB without autocorrelation
fit_glmmTMB_no_ar1 <- function(data, variables_families) {
for (entry in variables_families) {
var <- entry$var
fam <- entry$family
model <- tryCatch({
glmmTMB::glmmTMB(as.formula(paste(var, "~ season")), data = data, family = fam)
}, error = function(e) {
message("Could not fit model for ", var, ": ", e$message)
return(NULL)
})
if (is.null(model)) next
# Summary
cat("\n============================\n")
cat("Model summary for:", var, "\n")
print(summary(model))
# Extract p-value safely
coefs <- summary(model)$coefficients$cond
season_row <- grep("^season$", rownames(coefs))
p <- coefs[season_row, "Pr(>|z|)"]
# Base plot
p_plot <- ggplot(data, aes(x = season, y = .data[[var]])) +
geom_point(shape = 19, size = 2.5, color = "#7B8FA1") +
geom_line(linewidth = 0.5, color = "#7B8FA1", na.rm = TRUE)
# Add model trend and annotation only if p < 0.05
if (!is.na(p) && p < 0.05) {
# Predictions
newdata <- data.frame(season = seq(min(data$season), max(data$season)))
preds <- predict(model, newdata = newdata, se.fit = TRUE, type = "response")
# Annotation
p_label <- if (p < 0.001) "p < 0.001" else paste0("p = ", round(p, 3))
n <- nrow(data)
ann_text <- paste0("n = ", n, "\n", p_label)
p_plot <- p_plot +
geom_ribbon(data = data.frame(
season = newdata$season,
ymin = preds$fit - 1.96 * preds$se.fit,
ymax = preds$fit + 1.96 * preds$se.fit
), aes(x = season, ymin = ymin, ymax = ymax),
fill = "#B6D0E2", alpha = 0.4, inherit.aes = FALSE) +
geom_line(data = data.frame(
season = newdata$season,
fit = preds$fit
), aes(x = season, y = fit),
color = "#0D92F4", linewidth = 0.75, inherit.aes = FALSE) +
annotate("text",
x = Inf, y = Inf,
hjust = 1.1, vjust = 1.2,
size = 3, color = "#1a1a1a",
label = ann_text)
}
# Final styling and print
p_plot <- p_plot +
scale_x_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
labs(
title = paste("Trend in", gsub("_", " ", var)),
x = "Season", y = var
) +
theme_classic(base_family = "Helvetica") +
theme(
plot.title = element_text(size = 10, margin = margin(b = 1), colour = "#0d0d0d"),
axis.title = element_text(size = 9, colour = "#0d0d0d"),
axis.text = element_text(size = 8),
axis.ticks.length = unit(1, "pt"),
axis.ticks = element_line(linewidth = 0.5),
axis.line = element_blank(),
panel.border = element_rect(color = "#1a1a1a", fill = NA, size = 0.25)
)
print(p_plot)
}
}
# Fit glmmTMB and plot
fit_glmmTMB_no_ar1(summarised_wet_days,
variables_families = list(
list(var = "wet_day_frequency", family = poisson()),
list(var = "wet_day_cum_intensity", family = Gamma(link = "log")),
list(var = "wet_day_tot_duration", family = poisson())
)
)
##
## ============================
## Model summary for: wet_day_frequency
## Family: poisson ( log )
## Formula: wet_day_frequency ~ season
## Data: data
##
## AIC BIC logLik -2*log(L) df.resid
## 229.0 233.0 -112.5 225.0 53
##
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.4598710 6.3536877 0.544 0.586
## season -0.0007491 0.0031818 -0.235 0.814
##
## ============================
## Model summary for: wet_day_cum_intensity
## Family: Gamma ( log )
## Formula: wet_day_cum_intensity ~ season
## Data: data
##
## AIC BIC logLik -2*log(L) df.resid
## 561.6 567.6 -277.8 555.6 52
##
##
## Dispersion estimate for Gamma family (sigma^2): 0.133
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 33.49919 6.33046 5.292 1.21e-07 ***
## season -0.01443 0.00317 -4.552 5.32e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## ============================
## Model summary for: wet_day_tot_duration
## Family: poisson ( log )
## Formula: wet_day_tot_duration ~ season
## Data: data
##
## AIC BIC logLik -2*log(L) df.resid
## 318.9 322.9 -157.5 314.9 53
##
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 15.752495 3.967675 3.970 7.18e-05 ***
## season -0.006433 0.001988 -3.235 0.00121 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Standardise each numeric column to have mean zero and standard deviation of one
breeding_data_standardised <- breeding_data %>%
mutate(across(where(is.numeric), ~ ( . - mean(.) ) / sd(.)))
# Extract individual regressions from SEM
m1 <- lm(wet_day_signal ~ season, data = breeding_data_standardised)
m2 <- lm(chicks ~ wet_day_signal + season, data = breeding_data_standardised)
# Summarise individual regressions
summary(m1)
##
## Call:
## lm(formula = wet_day_signal ~ season, data = breeding_data_standardised)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.4487 -0.5665 -0.1667 0.4336 2.3290
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 5.462e-15 2.455e-01 0.000 1.0000
## season -4.702e-01 2.548e-01 -1.845 0.0898 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9186 on 12 degrees of freedom
## Multiple R-squared: 0.2211, Adjusted R-squared: 0.1562
## F-statistic: 3.406 on 1 and 12 DF, p-value: 0.08978
summary(m2)
##
## Call:
## lm(formula = chicks ~ wet_day_signal + season, data = breeding_data_standardised)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.92315 0.00608 0.02779 0.09199 0.88415
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1.785e-16 1.285e-01 0.000 1.0
## wet_day_signal -9.146e-01 1.511e-01 -6.053 8.28e-05 ***
## season -3.918e-02 1.511e-01 -0.259 0.8
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.4809 on 11 degrees of freedom
## Multiple R-squared: 0.8044, Adjusted R-squared: 0.7688
## F-statistic: 22.61 on 2 and 11 DF, p-value: 0.0001268
# Check model assumptions for individual regressions
par(mfrow = c(2, 2))
plot(m1)
plot(m2)
# Define the SEM model
sem_model <- '
# Regress breeding success on climate signals and time
chicks ~ wet_day_signal + season
# Regress climate signals on time
wet_day_signal ~ season
# Define intercept for breeding success
chicks ~ 1
'
# Fit the SEM model with non-parametric bootstrapping with 1000 iterations
set.seed(666)
fit <- lavaan::sem(sem_model, data = breeding_data_standardised, se = "bootstrap", bootstrap = 1000)
# Summarise the model fit
summary(fit, fit.measures = TRUE, rsquare=TRUE)
## lavaan 0.6-20 ended normally after 1 iteration
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 7
##
## Number of observations 14
##
## Model Test User Model:
##
## Test statistic 0.000
## Degrees of freedom 0
##
## Model Test Baseline Model:
##
## Test statistic 26.338
## Degrees of freedom 3
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.000
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -25.524
## Loglikelihood unrestricted model (H1) -25.524
##
## Akaike (AIC) 65.048
## Bayesian (BIC) 69.521
## Sample-size adjusted Bayesian (SABIC) 48.210
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent confidence interval - lower 0.000
## 90 Percent confidence interval - upper 0.000
## P-value H_0: RMSEA <= 0.050 NA
## P-value H_0: RMSEA >= 0.080 NA
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.000
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 1000
## Number of successful bootstrap draws 999
##
## Regressions:
## Estimate Std.Err z-value P(>|z|)
## chicks ~
## wet_day_signal -0.915 0.191 -4.779 0.000
## season -0.039 0.172 -0.228 0.819
## wet_day_signal ~
## season -0.470 0.314 -1.497 0.134
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|)
## .chicks 0.000 0.132 0.000 1.000
## .wet_day_signal 0.000 0.250 0.000 1.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## .chicks 0.182 0.074 2.466 0.014
## .wet_day_signal 0.723 0.333 2.173 0.030
##
## R-Square:
## Estimate
## chicks 0.804
## wet_day_signal 0.221
# Extract parameter estimates and standard errors using parameterEstimates()
params <- lavaan::parameterEstimates(fit)
relationships <- params[params$op == "~", ]
# Create a data frame with the desired results
results <- data.frame(
SEM_int = params$est[params$lhs == "chicks" & params$op == "~1"],
SEM_beta_wet_day = params$est[params$lhs == "chicks" & params$rhs == "wet_day_signal"],
SEM_beta_season = params$est[params$lhs == "chicks" & params$rhs == "season"],
SEM_SE_wet_day = params$se[params$lhs == "chicks" & params$rhs == "wet_day_signal"],
SEM_SE_season = params$se[params$lhs == "chicks" & params$rhs == "season"],
Yr_beta_wet_day = params$est[params$lhs == "wet_day_signal" & params$rhs == "season"],
Yr_SE_wet_day = params$se[params$lhs == "wet_day_signal" & params$rhs == "season"]
) %>%
mutate(
EWE_pathway = abs(SEM_beta_wet_day) * abs(Yr_beta_wet_day),
Total_effect_season = abs(SEM_beta_season) + abs(EWE_pathway),
change_due_to_EWE_pathway = (abs(EWE_pathway) / abs(Total_effect_season)) * 100
)
# Print results
print(results)
## SEM_int SEM_beta_wet_day SEM_beta_season SEM_SE_wet_day SEM_SE_season
## 1 1.391532e-16 -0.9146135 -0.03918479 0.1913704 0.171534
## Yr_beta_wet_day Yr_SE_wet_day EWE_pathway Total_effect_season
## 1 -0.4701753 0.313988 0.4300287 0.4692135
## change_due_to_EWE_pathway
## 1 91.64884
# Add arrow colors, line styles, and significance stars based on significance and direction
relationships <- relationships %>%
mutate(
color = case_when(
as.numeric(est) > 0 & as.numeric(pvalue) < 0.05 ~ "#0079FF", # Positive and significant = blue
as.numeric(est) < 0 & as.numeric(pvalue) < 0.05 ~ "#FF2929", # Negative and significant = red
TRUE ~ "#B7B7B7" # Non-significant = grey
),
style = ifelse(as.numeric(pvalue) < 0.05, "solid", "dashed"), # Significant = solid, Non-significant = dashed
unit = case_when(
lhs == "chicks" & rhs == "season" ~ "no./season",
lhs == "chicks" & rhs == "wet_day_signal" ~ "no./mm",
lhs == "wet_day_signal" ~ "mm/season",
TRUE ~ "unit" # Default unit for other relationships
),
stars = case_when( # Add stars based on significance level
pvalue < 0.001 ~ "***",
pvalue < 0.01 ~ "**",
pvalue < 0.05 ~ "*",
TRUE ~ ""
)
)
# Extract R² values
rsq <- lavaan::inspect(fit, "rsquare")
node_labels <- c(
paste0("Chick count\nr² = ", round(rsq["chicks"], 2)),
paste0("Wet day\nr² = ", round(rsq["wet_day_signal"], 2)),
"Season"
)
# Construct edges for the plot, adding significance stars in the label
edges <- apply(relationships, 1, function(row) {
paste0(
"\"", row["rhs"], "\" -> \"", row["lhs"], "\" ", # Add double quotes to handle special characters in node names
"[color=\"", row["color"], "\", style=", row["style"],
", label=\"", round(as.numeric(row["est"]), 2), row["stars"], "\n(", row["unit"], ")\", fontsize=10]"
)
})
# Generate DiagrammeR graph with rectangular nodes and custom labels
graph_code <- paste0(
"digraph SEM {",
"\nnode [shape=rectangle, style=filled, fillcolor=white];",
"\n", paste(edges, collapse = "\n"),
"\n", paste0(
"\"chicks\" [label=\"Chick count\\nr² = ", round(rsq["chicks"], 2), "\", fontsize=12];",
"\"wet_day_signal\" [label=\"Wet day\\nr² = ", round(rsq["wet_day_signal"], 2), "\", fontsize=12];",
"\"season\" [label=\"Season\", fontsize=12];"
),
"\n}"
)
# Render the DiagrammeR graph
DiagrammeR::grViz(graph_code)