# Load libraries for data processing, modelling, and visualisation
library(tidyverse)
library(openxlsx2)
library(MASS)
library(corrplot)
library(DT)
library(climwin)
library(jtools)
library(DHARMa)
library(lavaan)
library(DiagrammeR)
library(lmtest)
library(glmmTMB)
library(scales)
# Load daily extreme weather event (EWE) data
extreme_weather <- wb_to_df("Breeding_colony_ewes/Little_Green_ewes.xlsx") %>%
dplyr::mutate(across(4:8, ~ ifelse(is.na(.), 0, .))) # Replace NA with 0 for analysis
# Create binary version: 1 = event occurred, 0 = no event, NA = missing
extreme_weather_binary <- extreme_weather %>%
dplyr::mutate(across(4:8, ~ ifelse(!is.na(.) & . != 0, 1, ifelse(is.na(.), NA, 0))))
# Note:
# Missing values in extreme weather data are replaced with zero. This is critical because the slidingwin method ("method1" and "method2") internally calculates means when NA values are present, which is not suitable when assessing extreme values. We are specifically interested in whether an extreme event occurred, not in average conditions.
# A small number of missing values are present in the dataset, and replacing them with zero ensures consistency without introducing bias in this context.
# Load Short-tailed Shearwater breeding data
breeding_data <- wb_to_df("Breeding_data/STSH.xlsx", sheet = "Little_Green_Island") %>%
dplyr::filter(!is.na(bs)) # Remove seasons without breeding success data
# Record sample size
sample_size <- nrow(breeding_data)
# Assess normality of response variable
# If p > 0.05, the data does not significantly deviate from normality.
shapiro.test(breeding_data$bs)
##
## Shapiro-Wilk normality test
##
## data: breeding_data$bs
## W = 0.93562, p-value = 0.1607
# Histogram with density curve
hist(breeding_data$bs,
main = "Histogram of Breeding Success",
xlab = "Breeding Success",
col = "#a6d6fa",
border = "white",
prob = TRUE
)
# Overlay kernel density estimate
lines(density(breeding_data$bs, na.rm = TRUE), col = "#0D92F4", lwd = 2)
# Q-Q plot
ggplot(breeding_data, aes(sample = bs)) +
stat_qq() +
stat_qq_line(colour = "red") +
labs(title = "Q-Q Plot of Breeding Success",
x = "Theoretical Quantiles",
y = "Sample Quantiles") +
theme_classic()
The Shapiro–Wilk test did not indicate a significant deviation from normality (W = 0.93562, p = 0.1607); therefore, we failed to reject the null hypothesis that the data are normally distributed.
# Run the sliding window analysis using actual (non-binary) values
output1 <- slidingwin(xvar = list(wet_day = extreme_weather$wet_day,
heavy_rain_day = extreme_weather$heavy_rain_day,
very_heavy_rain_day = extreme_weather$very_heavy_rain_day,
ewdp = extreme_weather$ewdp,
vwdp = extreme_weather$vwdp),
cdate = extreme_weather$date, # Climate date
bdate = breeding_data$date, # Biological event date
baseline = lm(bs ~ 1,
data = breeding_data), # Baseline model
cohort = breeding_data$season, # Group by season
cinterval = "day", # Daily resolution
range = c(95, 0), # Check windows from 1 December to 6 March
refday = c(06, 03), # Reference date: 6 March
type = "absolute", # Absolute window type
stat = "sum", # Sum values within each window
func = "lin" # For linear relationship
)
# Run the sliding window analysis using binary event indicators
output2 <- slidingwin(xvar = list(wet_day_bi = extreme_weather_binary$wet_day,
heavy_rain_day_bi = extreme_weather_binary$heavy_rain_day,
very_heavy_rain_day_bi = extreme_weather_binary$very_heavy_rain_day,
ewdp_bi = extreme_weather_binary$ewdp,
vwdp_bi = extreme_weather_binary$vwdp),
cdate = extreme_weather_binary$date, # Climate date
bdate = breeding_data$date, # Biological event date
baseline = lm(bs ~ 1,
data = breeding_data), # Baseline model
cohort = breeding_data$season, # Group by season
cinterval = "day", # Daily resolution
range = c(95, 0), # Check windows from 1 December to 6 March
refday = c(06, 03), # Reference date: 6 March
type = "absolute", # Absolute window type
stat = "sum", # Sum values within each window
func = "lin" # For linear relationship
)
# Combine output from actual and binary sliding window analyses
output <- merge_results(output1, output2)
# View merged model combinations with calculated window duration
datatable(output$combos %>%
dplyr::mutate(WindowDuration = WindowOpen - WindowClose + 1),
options = list(pageLength = 10, orderClasses = TRUE))
Before running the randomisation process, we need to identify the best-performing model for each extreme weather variable. This ensures that we are testing the most likely biologically relevant window against random expectation.
What we are doing here: For each weather variable (e.g., heavy rain, wet days), we extract the model with:
The lowest AIC value, and
A window duration longer than 14 days, to focus on ecologically meaningful timeframes.
These best models represent the strongest climate–breeding success relationships, and will be used for the randomisation test to assess whether the relationship is likely to have occurred by chance.
# Summary of the best model
summary(output[[9]]$BestModel)
##
## Call:
## lm(formula = yvar ~ climate, data = modeldat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.38572 -0.05593 0.05217 0.11217 0.19213
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.70236 0.03795 18.508 4.72e-14 ***
## climate -0.16516 0.07960 -2.075 0.0511 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1565 on 20 degrees of freedom
## Multiple R-squared: 0.1771, Adjusted R-squared: 0.136
## F-statistic: 4.305 on 1 and 20 DF, p-value: 0.05112
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[9]]$Dataset)
## $`Median Window Open`
## [1] 68
##
## $`Median Window Close`
## [1] 30
# Randomisation test to assess if the detected signal is likely by chance
ewdp_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(ewdp_bi = extreme_weather_binary$ewdp),
cdate = extreme_weather_binary$date,
bdate = breeding_data$date,
baseline = lm(bs ~ 1,
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
range = c(95, 0),
refday = c(06, 03),
type = "absolute",
stat = c("sum"),
func = c("lin")
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[9]]$Dataset,
datasetrand = ewdp_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.7574966
# Plot sliding window and randomisation result
climwin::plotall(dataset = output[[9]]$Dataset,
datasetrand = ewdp_randwin[[1]],
bestmodel = output[[9]]$BestModel,
bestmodeldata = output[[9]]$BestModelData,
arrow = TRUE
)
# Summarise the best model
summary(output[[3]]$BestModel)
##
## Call:
## lm(formula = yvar ~ climate, data = modeldat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.30499 -0.10093 0.02217 0.11838 0.27287
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.621621 0.037905 16.399 4.6e-13 ***
## climate 0.003168 0.001395 2.271 0.0343 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1538 on 20 degrees of freedom
## Multiple R-squared: 0.205, Adjusted R-squared: 0.1653
## F-statistic: 5.158 on 1 and 20 DF, p-value: 0.03433
# Calculate the median window from models within 95% confidence interval of the best model
medwin(output[[3]]$Dataset)
## $`Median Window Open`
## [1] 69
##
## $`Median Window Close`
## [1] 29
# Randomisation test to assess if the detected signal is likely by chance
very_heavy_rain_randwin <- randwin(repeats = 10,
window = "sliding",
xvar = list(very_heavy_rain_day = extreme_weather$very_heavy_rain_day),
cdate = extreme_weather$date,
bdate = breeding_data$date,
baseline = lm(bs ~ 1,
data = breeding_data),
cohort = breeding_data$season,
cinterval = "day",
range = c(95, 0),
refday = c(06, 03),
type = "absolute",
stat = "sum",
func = "lin"
)
# Calculate the p-value using Climwin Metric C
climwin::pvalue(dataset = output[[3]]$Dataset,
datasetrand = very_heavy_rain_randwin[[1]],
metric = "C",
sample.size = sample_size
)
## [1] 0.7357621
# Plot sliding window and randomisation result
climwin::plotall(dataset = output[[3]]$Dataset,
datasetrand = very_heavy_rain_randwin[[1]],
bestmodel = output[[3]]$BestModel,
bestmodeldata = output[[3]]$BestModelData,
arrow = TRUE
)
############################## Write function to run Durbin-Watson test ##############################
# Run Durbin-Watson test for autocorrelation
run_dw_test <- function(data, vars) {
results <- lapply(vars, function(var) {
formula_obj <- as.formula(paste(var, "~ season"))
model <- lm(formula_obj, data = data)
dw <- dwtest(model)
data.frame(
variable = var,
DW_statistic = round(dw$statistic[[1]], 3),
p_value = round(dw$p.value, 4),
autocorrelation = ifelse(dw$p.value < 0.05,
ifelse(dw$statistic < 2, "positive", "negative"),
"none"),
row.names = NULL
)
})
do.call(rbind, results)
}
# Check for autocorrelation in breeding success
run_dw_test(breeding_data, c("bs"))
## variable DW_statistic p_value autocorrelation
## 1 bs 1.725 0.1829 none
# Function to fit glmmTMB without autocorrelation
fit_glmmTMB_no_ar1 <- function(data, variables_families) {
for (entry in variables_families) {
var <- entry$var
fam <- entry$family
model <- tryCatch({
glmmTMB::glmmTMB(as.formula(paste(var, "~ season")), data = data, family = fam)
}, error = function(e) {
message("Could not fit model for ", var, ": ", e$message)
return(NULL)
})
if (is.null(model)) next
# Summary
cat("\n============================\n")
cat("Model summary for:", var, "\n")
print(summary(model))
# Extract p-value safely
coefs <- summary(model)$coefficients$cond
season_row <- grep("^season$", rownames(coefs))
p <- coefs[season_row, "Pr(>|z|)"]
# Base plot
p_plot <- ggplot(data, aes(x = season, y = .data[[var]])) +
geom_point(shape = 19, size = 2.5, color = "#7B8FA1") +
geom_line(linewidth = 0.5, color = "#7B8FA1", na.rm = TRUE)
# Add model trend and annotation only if p < 0.05
if (!is.na(p) && p < 0.05) {
# Predictions
newdata <- data.frame(season = seq(min(data$season), max(data$season)))
preds <- predict(model, newdata = newdata, se.fit = TRUE, type = "response")
# Annotation
p_label <- if (p < 0.001) "p < 0.001" else paste0("p = ", round(p, 3))
n <- nrow(data)
ann_text <- paste0("n = ", n, "\n", p_label)
p_plot <- p_plot +
geom_ribbon(data = data.frame(
season = newdata$season,
ymin = preds$fit - 1.96 * preds$se.fit,
ymax = preds$fit + 1.96 * preds$se.fit
), aes(x = season, ymin = ymin, ymax = ymax),
fill = "#B6D0E2", alpha = 0.4, inherit.aes = FALSE) +
geom_line(data = data.frame(
season = newdata$season,
fit = preds$fit
), aes(x = season, y = fit),
color = "#0D92F4", linewidth = 0.75, inherit.aes = FALSE) +
annotate("text",
x = Inf, y = Inf,
hjust = 1.1, vjust = 1.2,
size = 3, color = "#1a1a1a",
label = ann_text)
}
# Final styling and print
p_plot <- p_plot +
scale_x_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 5),
expand = ggplot2::expansion(mult = c(0.02, 0.02))) +
labs(
title = paste("Trend in", gsub("_", " ", var)),
x = "Season", y = var
) +
theme_classic(base_family = "Helvetica") +
theme(
plot.title = element_text(size = 10, margin = margin(b = 1), colour = "#0d0d0d"),
axis.title = element_text(size = 9, colour = "#0d0d0d"),
axis.text = element_text(size = 8),
axis.ticks.length = unit(1, "pt"),
axis.ticks = element_line(linewidth = 0.5),
axis.line = element_blank(),
panel.border = element_rect(color = "#1a1a1a", fill = NA, size = 0.25)
)
print(p_plot)
}
}
# Fit glmmTMB and plot
fit_glmmTMB_no_ar1(breeding_data,
variables_families = list(list(var = "bs",
family = gaussian())
)
)
##
## ============================
## Model summary for: bs
## Family: gaussian ( identity )
## Formula: bs ~ season
## Data: data
##
## AIC BIC logLik -2*log(L) df.resid
## -11.0 -7.7 8.5 -17.0 19
##
##
## Dispersion estimate for gaussian family (sigma^2): 0.027
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.3353023 8.9240898 0.038 0.970
## season 0.0001637 0.0044345 0.037 0.971