# Load libraries for data processing, modelling, and visualisation
library(tidyverse)
library(openxlsx2)
library(MASS)
library(corrplot)
library(DT)
library(climwin)
library(jtools)
library(DHARMa)
library(lavaan)
library(DiagrammeR)
library(lmtest)
library(glmmTMB)
library(scales)
# Load daily extreme weather event (EWE) data
extreme_weather <- wb_to_df("Breeding_colony_ewes/West_Moncoeur_ewes.xlsx") %>%
dplyr::mutate(across(4:20, ~ ifelse(is.na(.), 0, .)))
# Create binary version: 1 = event occurred, 0 = no event, NA = missing
extreme_weather_binary <- extreme_weather %>%
dplyr::mutate(across(4:20, ~ ifelse(!is.na(.) & . != 0, 1, ifelse(is.na(.), NA, 0))))
# Note:
# Missing values in extreme weather data are replaced with zero. This is critical because the slidingwin method ("method1" and "method2") internally calculates means when NA values are present, which is not suitable when assessing extreme values. We are specifically interested in whether an extreme event occurred, not in average conditions.
# A small number of missing values are present in the dataset, and replacing them with zero ensures consistency without introducing bias in this context.
# Load Short-tailed Shearwater breeding data
breeding_data <- wb_to_df("Breeding_data/AUFS.xlsx", sheet = "West_Moncoeur") %>%
dplyr::filter(!is.na(pup)) # Remove seasons without productivity data
# Record sample size
sample_size <- nrow(breeding_data)
# Assess normality of response variable
# If p > 0.05, the data does not significantly deviate from normality.
shapiro.test(breeding_data$pup)
##
## Shapiro-Wilk normality test
##
## data: breeding_data$pup
## W = 0.90162, p-value = 0.1664
# Histogram with density curve
hist(breeding_data$pup,
main = "Histogram of Pup Count",
xlab = "Pup Count",
col = "#a6d6fa",
border = "white",
prob = TRUE
)
# Overlay kernel density estimate
lines(density(breeding_data$pup, na.rm = TRUE), col = "#0D92F4", lwd = 2)
# Q-Q plot
ggplot(breeding_data, aes(sample = pup)) +
stat_qq() +
stat_qq_line(colour = "red") +
labs(title = "Q-Q Plot of Pup count",
x = "Theoretical Quantiles",
y = "Sample Quantiles") +
theme_classic()
# Calculate mean and variance
mean(breeding_data$pup)
## [1] 233
var(breeding_data$pup)
## [1] 3677.818
The Shapiro–Wilk test did not indicate a significant deviation from normality (W = 0.90162, p = 0.1664); therefore, we failed to reject the null hypothesis that the data are normally distributed. However, visual assessments, including the histogram and Q–Q plot, support the assumption that the data does not approximate a normal distribution.
Furthermore, the mean (233) and variance (3677.818) differ substantially, indicating overdispersion and suggesting that a Poisson distribution—where the mean and variance are expected to be equal—is not appropriate for this dataset. Therefore, we will use a Negative Binomial distribution to model the data.
# Run the sliding window analysis using actual (non-binary) values
output1 <- slidingwin(xvar = list(warm_day = extreme_weather$warm_day,
warm_night = extreme_weather$warm_night,
heatwave = extreme_weather$heatwave,
cool_day = extreme_weather$cool_day,
cool_night = extreme_weather$cool_night,
coldwave = extreme_weather$coldwave,
wet_day = extreme_weather$wet_day,
heavy_rain_day = extreme_weather$heavy_rain_day,
very_heavy_rain_day = extreme_weather$very_heavy_rain_day,
ewdp = extreme_weather$ewdp,
vwdp = extreme_weather$vwdp,
extreme_wind_day = extreme_weather$extreme_wind_day,
extreme_wave_energy_day = extreme_weather$extreme_wave_energy_day,
extreme_wbt_day = extreme_weather$extreme_wbt_day,
extreme_wbgt_day = extreme_weather$extreme_wbgt_day,
extreme_at_day = extreme_weather$extreme_at_day,
extreme_wind_chill_day = extreme_weather$extreme_wind_chill_day),
cdate = extreme_weather$date, # Daily climate record dates
bdate = breeding_data$date, # Biological event dates (pup count dates)
baseline = glm.nb(pup ~ 1,
link = "log",
data = breeding_data), # Baseline model: Negative Binomial
cohort = breeding_data$season, # Group by season
refday = c(30, 01), # Last day of monitoring across the seasons
cinterval = "day", # Daily resolution
range = c(96, 0), # Test all possible windows within the range
type = "absolute", # Absolute to each biological event date
stat = "sum", # Sum of EWE values over the window
func = "lin" # Test linear relationships
)
# Run the sliding window analysis using binary event indicators
output2 <- slidingwin(xvar = list(warm_day_bi = extreme_weather_binary$warm_day,
warm_night_bi = extreme_weather_binary$warm_night,
heatwave_bi = extreme_weather_binary$heatwave,
cool_day_bi = extreme_weather_binary$cool_day,
cool_night_bi = extreme_weather_binary$cool_night,
codlwave_bi = extreme_weather_binary$coldwave,
wet_day_bi = extreme_weather_binary$wet_day,
heavy_rain_day_bi = extreme_weather_binary$heavy_rain_day,
very_heavy_rain_day_bi = extreme_weather_binary$very_heavy_rain_day,
ewdp_bi = extreme_weather_binary$ewdp,
vwdp_bi = extreme_weather_binary$vwdp,
extreme_wind_day_bi = extreme_weather_binary$extreme_wind_day,
extreme_wave_energy_day_bi = extreme_weather_binary$extreme_wave_energy_day,
extreme_wbt_day_bi = extreme_weather_binary$extreme_wbt_day,
extreme_wbgt_day_bi = extreme_weather_binary$extreme_wbgt_day,
extreme_at_day_bi = extreme_weather_binary$extreme_at_day,
extreme_wind_chill_day_bi = extreme_weather_binary$extreme_wind_chill_day),
cdate = extreme_weather_binary$date, # Daily climate record dates
bdate = breeding_data$date, # Biological event dates (pup count dates)
baseline = glm.nb(pup ~ 1,
link = "log",
data = breeding_data), # Baseline model: Negative Binomial
cohort = breeding_data$season, # Group by season
refday = c(30, 01), # Last day of monitoring across the seasons
cinterval = "day", # Daily resolution
range = c(96, 0), # Test all possible windows within the range
type = "absolute", # Absolute to each biological event date
stat = "sum", # Sum of EWE values over the window
func = "lin" # Test linear relationships
)
# Calculate the circular mean of directional data (in degrees)
# This function returns the mean direction of angular values (e.g. wave or wind direction), accounting for circularity (i.e. wrap-around at 360°).
circ_mean <- function(x) {
# Convert degrees to radians
radians <- x * pi / 180
# Compute mean sine and cosine
mean_sin <- mean(sin(radians), na.rm = TRUE)
mean_cos <- mean(cos(radians), na.rm = TRUE)
# Calculate circular mean in radians and convert back to degrees
mean_angle <- atan2(mean_sin, mean_cos) * 180 / pi
# Ensure result is within 0–360 degrees
if (mean_angle < 0) mean_angle + 360 else mean_angle
}
# Run the sliding window analysis with daily mean wave direction data
output3 <- slidingwin(xvar = list(wave_direction = extreme_weather$wave_direction),
cdate = extreme_weather$date, # Daily climate record dates
bdate = breeding_data$date, # Biological event dates (pup count dates)
baseline = glm.nb(pup ~ 1,
link = "log",
data = breeding_data), # Baseline model: Negative Binomial
cohort = breeding_data$season, # Group by season
refday = c(30, 01), # Last day of monitoring across the seasons
cinterval = "day", # Daily resolution
range = c(96, 0), # Test all possible windows within the range
type = "absolute", # Absolute to each biological event date
stat = "circ_mean", # Circular mean of daily wave direction values over the window
func = c("lin", "quad") # Test linear and quadratic relationships
)
# Examine the tested combinations
datatable(output3$combos %>%
dplyr::mutate(WindowDuration = WindowOpen - WindowClose + 1),
options = list(pageLength = 10, orderClasses = TRUE)
)
# Combine output from actual and binary sliding window analyses
output <- merge_results(output1, output2)
# View merged model combinations with calculated window duration
datatable(output$combos %>%
dplyr::mutate(WindowDuration = WindowOpen - WindowClose + 1),
options = list(pageLength = 10, orderClasses = TRUE))
Before running the randomisation process, we need to identify the best-performing model for each extreme weather variable. This ensures that we are testing the most likely biologically relevant window against random expectation.
What we are doing here: For each weather variable (e.g., heavy rain, wet days), we extract the model with:
The lowest AIC value, and
A window duration longer than 14 days, to focus on ecologically meaningful timeframes.
These best models represent the strongest climate–breeding success relationships, and will be used for the randomisation test to assess whether the relationship is likely to have occurred by chance.
# Summary of the best model
summary(output[[4]]$BestModel)
##
## Call:
## glm.nb(formula = yvar ~ climate, data = modeldat, init.theta = 57.32421438,
## link = "log")
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 5.20082 0.06593 78.879 < 2e-16 ***
## climate -0.12805 0.02714 -4.719 2.37e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(57.3242) family taken to be 1)
##
## Null deviance: 35.255 on 11 degrees of freedom
## Residual deviance: 12.471 on 10 degrees of freedom
## AIC: 124.81
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 57.3
## Std. Err.: 30.4
##
## 2 x log-likelihood: -118.809
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[4]]$Dataset)
## $`Median Window Open`
## [1] 62
##
## $`Median Window Close`
## [1] 21
# Randomisation test to assess if the detected signal is likely by chance
cool_day_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(cool_day = extreme_weather$cool_day),
cdate = extreme_weather$date,
bdate = breeding_data$date,
baseline = glm.nb(pup ~ 1,
link = "log",
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
refday = c(30, 01),
range = c(96, 0),
type = "absolute",
stat = c("sum"),
func = c("lin")
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[4]]$Dataset,
datasetrand = cool_day_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.1630257
# Plot sliding window and randomisation result
climwin::plotall(dataset = output[[4]]$Dataset,
datasetrand = cool_day_randwin[[1]],
bestmodel = output[[4]]$BestModel,
bestmodeldata = output[[4]]$BestModelData,
arrow = TRUE
)
# Summary of the best model
summary(output[[33]]$BestModel)
##
## Call:
## glm.nb(formula = yvar ~ climate, data = modeldat, init.theta = 41.91809717,
## link = "log")
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 5.96354 0.14002 42.590 < 2e-16 ***
## climate -0.06180 0.01547 -3.994 6.51e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(41.9181) family taken to be 1)
##
## Null deviance: 27.308 on 11 degrees of freedom
## Residual deviance: 12.075 on 10 degrees of freedom
## AIC: 127.5
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 41.9
## Std. Err.: 20.3
##
## 2 x log-likelihood: -121.498
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[33]]$Dataset)
## $`Median Window Open`
## [1] 74
##
## $`Median Window Close`
## [1] 22
# Randomisation test to assess if the detected signal is likely by chance
extreme_at_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(extreme_at_day_bi = extreme_weather_binary$extreme_at_day),
cdate = extreme_weather_binary$date,
bdate = breeding_data$date,
baseline = glm.nb(pup ~ 1,
link = "log",
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
refday = c(30, 01),
range = c(96, 0),
type = "absolute",
stat = c("sum"),
func = c("lin")
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[33]]$Dataset,
datasetrand = extreme_at_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.3236161
# Plot sliding window and randomisation result
climwin::plotall(dataset = output[[33]]$Dataset,
datasetrand = extreme_at_randwin[[1]],
bestmodel = output[[33]]$BestModel,
bestmodeldata = output[[33]]$BestModelData,
arrow = TRUE
)
############################## Write function to run Durbin-Watson test ##############################
# Run Durbin-Watson test for autocorrelation
run_dw_test <- function(data, vars) {
results <- lapply(vars, function(var) {
formula_obj <- as.formula(paste(var, "~ season"))
model <- lm(formula_obj, data = data)
dw <- dwtest(model)
data.frame(
variable = var,
DW_statistic = round(dw$statistic[[1]], 3),
p_value = round(dw$p.value, 4),
autocorrelation = ifelse(dw$p.value < 0.05,
ifelse(dw$statistic < 2, "positive", "negative"),
"none"),
row.names = NULL
)
})
do.call(rbind, results)
}
# Check for autocorrelation in breeding success
run_dw_test(breeding_data, c("pup"))
## variable DW_statistic p_value autocorrelation
## 1 pup 2.919 0.9204 none
# Function to fit glmmTMB without autocorrelation
fit_glmmTMB_no_ar1 <- function(data, variables_families) {
for (entry in variables_families) {
var <- entry$var
fam <- entry$family
model <- tryCatch({
glmmTMB::glmmTMB(as.formula(paste(var, "~ season")), data = data, family = fam)
}, error = function(e) {
message("Could not fit model for ", var, ": ", e$message)
return(NULL)
})
if (is.null(model)) next
# Summary
cat("\n============================\n")
cat("Model summary for:", var, "\n")
print(summary(model))
# Extract p-value safely
coefs <- summary(model)$coefficients$cond
season_row <- grep("^season$", rownames(coefs))
p <- coefs[season_row, "Pr(>|z|)"]
# Base plot
p_plot <- ggplot(data, aes(x = season, y = .data[[var]])) +
geom_point(shape = 19, size = 2.5, color = "#7B8FA1") +
geom_line(linewidth = 0.5, color = "#7B8FA1", na.rm = TRUE)
# Add model trend and annotation only if p < 0.05
if (!is.na(p) && p < 0.05) {
# Predictions
newdata <- data.frame(season = seq(min(data$season), max(data$season)))
preds <- predict(model, newdata = newdata, se.fit = TRUE, type = "response")
# Annotation
p_label <- if (p < 0.001) "p < 0.001" else paste0("p = ", round(p, 3))
n <- nrow(data)
ann_text <- paste0("n = ", n, "\n", p_label)
p_plot <- p_plot +
geom_ribbon(data = data.frame(
season = newdata$season,
ymin = preds$fit - 1.96 * preds$se.fit,
ymax = preds$fit + 1.96 * preds$se.fit
), aes(x = season, ymin = ymin, ymax = ymax),
fill = "#B6D0E2", alpha = 0.4, inherit.aes = FALSE) +
geom_line(data = data.frame(
season = newdata$season,
fit = preds$fit
), aes(x = season, y = fit),
color = "#0D92F4", linewidth = 0.75, inherit.aes = FALSE) +
annotate("text",
x = Inf, y = Inf,
hjust = 1.1, vjust = 1.2,
size = 3, color = "#1a1a1a",
label = ann_text)
}
# Final styling and print
p_plot <- p_plot +
scale_x_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
labs(
title = paste("Trend in", gsub("_", " ", var)),
x = "Season", y = var
) +
theme_classic(base_family = "Helvetica") +
theme(
plot.title = element_text(size = 10, margin = margin(b = 1), colour = "#0d0d0d"),
axis.title = element_text(size = 9, colour = "#0d0d0d"),
axis.text = element_text(size = 8),
axis.ticks.length = unit(1, "pt"),
axis.ticks = element_line(linewidth = 0.5),
axis.line = element_blank(),
panel.border = element_rect(color = "#1a1a1a", fill = NA, size = 0.25)
)
print(p_plot)
}
}
# Fit glmmTMB and plot
fit_glmmTMB_no_ar1(breeding_data,
variables_families = list(list(var = "pup",
family = nbinom2())
)
)
##
## ============================
## Model summary for: pup
## Family: nbinom2 ( log )
## Formula: pup ~ season
## Data: data
##
## AIC BIC logLik -2*log(L) df.resid
## 135.0 136.4 -64.5 129.0 9
##
##
## Dispersion parameter for nbinom2 family (): 20.8
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 29.319204 14.442242 2.030 0.0423 *
## season -0.011937 0.007221 -1.653 0.0983 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1