Skip to content

Commit

Permalink
replace datasets in rsf code
Browse files Browse the repository at this point in the history
  • Loading branch information
walterASEL committed Mar 14, 2024
1 parent 16fc663 commit 4872656
Show file tree
Hide file tree
Showing 6 changed files with 141,678 additions and 31 deletions.
Binary file modified .DS_Store
Binary file not shown.
14 changes: 8 additions & 6 deletions LinearDistScript.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ plot(st_geometry(deer.albers),add=T, col="red")
11\. Then we need to expand the bounding polygon so all locations are included. We can then make the bounding polygon (AlbersSP) a class owin in order to proceed with functions in package spatstat.

```{r warning=FALSE,message=FALSE}
buffSP <- st_buffer(AlbersSP,1500)
buffSP <- st_buffer(AlbersSP,2000)
plot(st_geometry(buffSP))
plot(st_geometry(deer.albers),add=T,col="red")
```
Expand All @@ -128,12 +128,14 @@ is.owin(bdy.owin)
12\. Now clip the raster using the buffered bounding box (buffSP) created in step 5.

```{r warning=FALSE,message=FALSE}
bbclip <- crop(veg, buffSP)
cliproads <- st_intersection(roads, buffSP, byid=TRUE)
cliprivers <- st_intersection(rivers, buffSP, byid=TRUE)
cropSP <- st_buffer(AlbersSP,1000)
bbclip <- crop(veg, cropSP)
cliproads <- st_intersection(roads, cropSP, byid=TRUE)
cliprivers <- st_intersection(rivers, cropSP, byid=TRUE)
plot(bbclip)
plot(st_geometry(buffSP),add=T)
plot(st_geometry(buffSP))
plot(bbclip,add=T)
plot(st_geometry(cropSP),add=T)
plot(st_geometry(deer.albers),add=T,col="red")
plot(st_geometry(cliproads),add=T)
plot(st_geometry(cliprivers), col="blue",add=T)
Expand Down
30 changes: 14 additions & 16 deletions LogisticRSF.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -25,24 +25,22 @@ library(adehabitatHR)
3\. Load files for each season in the mule deer dataset We may need to identify some numerical data as factors for analysis prior to implementing resource selection analysis.

```{r}
data_1 <- read.csv("data/MD_winter12.csv",header=T)
############################### MODELING ###############################
data_1 <- read.csv("data/MD_winter12sub.csv",header=T)
###################### MODELING #########################
data_1$crop <- data_1$nlcd
data_1$crop=as.factor(data_1$crop)
#dataset$SEASON=factor(dataset$SEASON)
data_1[,3:4]=scale(data_1[,3:4],scale=TRUE)#standardize data to mean of zero
```

4\. We may need to use code that changes Reference Categories of our data. For our analysis we are going to define reference category of \emph{used} habitat as crop= 1. Crop category 1 is sunflower which was the crop of interest but was not selected for based on Selection Ratios in Exercise 8.4.

```{r eval=FALSE}
fit1 = glmer(use ~ relevel(crop,"1")+(1|animal_id), data=data_1, family=binomial(link=
fit1 = glmer(use ~ relevel(crop,"5")+(1|animal_id), data=data_1, family=binomial(link=
"logit"),nAGQ = 0)#Sunflower and cover model
fit2 = glmer(use ~ d_cover+(1|animal_id), data=data_1, family=binomial(link="logit"),nAGQ
fit2 = glmer(use ~ elevation+(1|animal_id), data=data_1, family=binomial(link="logit"),nAGQ
= 0)#Distance to cover only model
fit3 = glmer(use ~ d_roads+(1|animal_id), data=data_1, family=binomial(link="logit"),nAGQ
fit3 = glmer(use ~ crop+slope+(1|animal_id), data=data_1, family=binomial(link="logit"),nAGQ
= 0)#Distance to roads only model
fit4 = glmer(use ~ d_cover+d_roads+(1|animal_id), data=data_1, family=binomial(link="logit"),
fit4 = glmer(use ~ crop+elevation+(1|animal_id), data=data_1, family=binomial(link="logit"),
nAGQ = 0)#Distance to cover and roads model
fit5 = glmer(use ~ 1|animal_id, data=data_1, family=binomial(link="logit"),nAGQ = 0)#Intercept model
```
Expand All @@ -66,25 +64,25 @@ print(myaicc, LL = FALSE)
6\. Our top model (fit 1) has all the support in this case indicating that during winter 2012 the mule deer were selecting for each habitat over sunflower. Considering sunflower is not available during the winter months this makes perfect sense. Looking at parameter estimates and confidence intervals for the additional habitat categories in fit 1 we see that forest (category 4) is most selected habitat followed by shrub (category 5). This is only a simply way to look at habitat, however, we used more animals that were on the air for several years and also could look at distance to habitat instead of representing habitat as categorical data.

```{r eval=FALSE}
per1_se <- sqrt(diag(vcov(fit1)))
per1_se <- sqrt(diag(vcov(fit4)))
# table of estimates with 95% CI
tab_per1 <- cbind(Est = fixef(fit1), LL = fixef(fit1) - 1.96 * per1_se, UL = fixef(fit1)
tab_per1 <- cbind(Est = fixef(fit4), LL = fixef(fit4) - 1.96 * per1_se, UL = fixef(fit4)
+ 1.96 * per1_se)
tab_per1
```

7\. We can then create a surface of predictions from our top model indicating where in our study site we might find the highest probability of use. To do this, we need to export a text file from our "layer" created in Exercise 8.3.

```{r eval=FALSE}
layer1 <- read.table("layer1.txt",sep=",")
names(layer1) = c("crop", "d_cover", "d_roads","x", "y")
layer1 <- read.table("data/layer1.txt",sep=",")
names(layer1) = c("crop", "elevation", "aspect","slope","x", "y","aspect_categorical")
#Need to standardize the raw distance rasters first to match what we modeled
layer1[,2:3]=scale(layer1[,2:3],scale=TRUE)
#layer1[,2:3]=scale(layer1[,2:3],scale=TRUE)
head(layer1)
layer1$crop <- as.factor(layer1$crop)
# predictions based on best model
predictions = predict(fit1, newdata=layer1, re.form=NA, type="link")#based on the scale of the
predictions = predict(fit4, newdata=layer1, re.form=NA, type="link")#based on the scale of the
#linear predictors
predictions = exp(predictions)
range(predictions)
Expand Down Expand Up @@ -122,7 +120,7 @@ table(layer1$prediction.class)
m = SpatialPixelsDataFrame(points = layer1[c("x", "y")], data=layer1)
# names(m)
# par(mar=c(0,0,0,0))
image(m, attr=7, col=c("grey90", "grey70", "grey50", "grey30", "grey10"))
image(m, attr=9, col=c("grey90", "grey70", "grey50", "grey30", "grey10"))
par(lend=1)
legend("bottomright", col=rev(c("grey90", "grey70", "grey50", "grey30", "grey10")),
legend=c("High", "Medium-high", "Medium", "Medium-low", "Low"),
Expand Down
41 changes: 32 additions & 9 deletions MD_DataPrep.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,32 @@ plot(st_geometry(deer.spdf),axes=T)
5. Only use code in this section for example exercise so fewer locations are used.

```{r}
deer.spdf <- deer.spdf[sample(nrow(deer.spdf),100),]
#deer.spdf <- deer.spdf[sample(nrow(deer.spdf),2000),]
#Project deer.spdf to Albers as in previous exercise
deer.albers <-st_transform(deer.spdf, crs=albers.crs)
plot(st_geometry(deer.albers),axes=T)
#Need to grab the data for the cropped dataset for use later
#First I had to find a function to easily convert sf dataframe with geometry to dataframe that includes all data and columns for x and y:
#https://github.com/r-spatial/sf/issues/231
sfc_as_cols <- function(x, geometry, names = c("x","y")) {
if (missing(geometry)) {
geometry <- sf::st_geometry(x)
} else {
geometry <- rlang::eval_tidy(enquo(geometry), x)
}
stopifnot(inherits(x,"sf") && inherits(geometry,"sfc_POINT"))
ret <- sf::st_coordinates(geometry)
ret <- tibble::as_tibble(ret)
stopifnot(length(names) == ncol(ret))
x <- x[ , !names(x) %in% names]
ret <- setNames(ret,names)
dplyr::bind_cols(x,ret)
}
muleys <- sfc_as_cols(deer.albers, st_centroid(geometry))
```

6\. If we get some NA errors because our grid does not encompass our panther locations then we can expand the grid size extending beyond our locations using methods in an earlier exercise.
Expand Down Expand Up @@ -147,7 +168,6 @@ for (z in length(asp)){
elev <- elev[elev$xy %in% slo$xy,]
asp <- asp[asp$xy %in% asp$xy,]
nlcd <- nlcd[nlcd$xy %in% asp$xy,]
```

11\. Combine elevation, slope, and aspect into one layer.
Expand All @@ -169,7 +189,7 @@ table(aspect_categorical)
table(is.na(aspect_categorical))
layers$aspect_categorical = aspect_categorical
head(layers)
write.table(layers,"data/layer1.txt",sep=",",col.names=TRUE, quote=FALSE)
#write.table(layers,"data/layer1.txt",sep=",",col.names=TRUE, quote=FALSE)
layers2 <- layers #change to layers2 simply to avoid confusion with "layer" term in function
#below
Expand Down Expand Up @@ -206,14 +226,14 @@ head(test)
##What is the problem here and how do we fix it?
#Need to grab Albers XY not UTM as in muleys above
muleys <- as.data.frame(sf::st_coordinates(deer.albers))
muleys2 <- as.data.frame(sf::st_coordinates(deer.albers))
# grab all values for used and available points based on combined layer data set
# can take 5+ minutes
used = grab.values(layers2, muleys$X, muleys$Y)
used = grab.values(layers2, muleys2$X, muleys2$Y)
# used$x = muleys$X
# used$y = muleys$Y
# used$animal_id = muleys$id
used$animal_id = muleys$id
used$use = 1
head(used)
```
Expand All @@ -223,15 +243,15 @@ head(used)
```{r warning=FALSE, message=FALSE, eval=FALSE}
#Create MCP for all locations for each deer by ID (3nd order selection).
#3 lines below needed for mcp function to work
muleys2 <- as.data.frame(deer.albers)
deer.spdf <- SpatialPointsDataFrame(muleys,muleys2)
muleys <- st_drop_geometry(muleys)
deer.spdf <- SpatialPointsDataFrame(muleys2,muleys)
cp = mcp(deer.spdf[,2],percent=100)
as.data.frame(cp)
#Determine the habitat available using all code below
#First create random sample of points in each polygon
random <- sapply(slot(cp, 'polygons'), function(i) spsample(i, n=50, type='random', offset=c(0,0)))
random <- sapply(slot(cp, 'polygons'), function(i) spsample(i, n=100, type='random', offset=c(0,0)))
plot(cp) ; points(random[[2]], col='red', pch=3, cex=.5)#The number in double brackets changes polygons stack into a single SpatialPoints object
random.merged <- do.call('rbind', random)
#Extract the original IDs
Expand Down Expand Up @@ -259,6 +279,9 @@ head(available)

```{r eval=FALSE}
data = rbind(available, used)
#Write out dataset for example dataset
write.csv(data,"data/MD_winter12sub.csv")
##A quick check of the data to determine if correct number of records.
#''data.frame': 200 obs. of 9 variables:
#100 locations used +
Expand Down
Loading

0 comments on commit 4872656

Please sign in to comment.